source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_logging.py
|
# -*- coding: utf-8 -*-
"""Stupid tests that ensure logging works as expected"""
from __future__ import division, absolute_import, print_function
import sys
import threading
import logging as log
from StringIO import StringIO
import beets.logging as blog
from beets import plugins, ui
import beetsplug
from test import _common
from test._common import unittest, TestCase
from test import helper
class LoggingTest(TestCase):
def test_logging_management(self):
l1 = log.getLogger("foo123")
l2 = blog.getLogger("foo123")
self.assertEqual(l1, l2)
self.assertEqual(l1.__class__, log.Logger)
l3 = blog.getLogger("bar123")
l4 = log.getLogger("bar123")
self.assertEqual(l3, l4)
self.assertEqual(l3.__class__, blog.BeetsLogger)
self.assertIsInstance(l3, (blog.StrFormatLogger,
blog.ThreadLocalLevelLogger))
l5 = l3.getChild("shalala")
self.assertEqual(l5.__class__, blog.BeetsLogger)
l6 = blog.getLogger()
self.assertNotEqual(l1, l6)
def test_str_format_logging(self):
l = blog.getLogger("baz123")
stream = StringIO()
handler = log.StreamHandler(stream)
l.addHandler(handler)
l.propagate = False
l.warning(u"foo {0} {bar}", "oof", bar=u"baz")
handler.flush()
self.assertTrue(stream.getvalue(), u"foo oof baz")
class LoggingLevelTest(unittest.TestCase, helper.TestHelper):
class DummyModule(object):
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.import_stages = [self.import_stage]
self.register_listener('dummy_event', self.listener)
def log_all(self, name):
self._log.debug(u'debug ' + name)
self._log.info(u'info ' + name)
self._log.warning(u'warning ' + name)
def commands(self):
cmd = ui.Subcommand('dummy')
cmd.func = lambda _, __, ___: self.log_all('cmd')
return (cmd,)
def import_stage(self, session, task):
self.log_all('import_stage')
def listener(self):
self.log_all('listener')
def setUp(self):
sys.modules['beetsplug.dummy'] = self.DummyModule
beetsplug.dummy = self.DummyModule
self.setup_beets()
self.load_plugins('dummy')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
del beetsplug.dummy
sys.modules.pop('beetsplug.dummy')
self.DummyModule.DummyPlugin.listeners = None
self.DummyModule.DummyPlugin._raw_listeners = None
def test_command_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertNotIn(u'dummy: debug cmd', logs)
def test_command_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertIn(u'dummy: debug cmd', logs)
def test_command_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
self.run_command('dummy')
self.assertIn(u'dummy: warning cmd', logs)
self.assertIn(u'dummy: info cmd', logs)
self.assertIn(u'dummy: debug cmd', logs)
def test_listener_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertNotIn(u'dummy: info listener', logs)
self.assertNotIn(u'dummy: debug listener', logs)
def test_listener_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertIn(u'dummy: info listener', logs)
self.assertNotIn(u'dummy: debug listener', logs)
def test_listener_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
plugins.send('dummy_event')
self.assertIn(u'dummy: warning listener', logs)
self.assertIn(u'dummy: info listener', logs)
self.assertIn(u'dummy: debug listener', logs)
def test_import_stage_level0(self):
self.config['verbose'] = 0
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertNotIn(u'dummy: info import_stage', logs)
self.assertNotIn(u'dummy: debug import_stage', logs)
def test_import_stage_level1(self):
self.config['verbose'] = 1
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertIn(u'dummy: info import_stage', logs)
self.assertNotIn(u'dummy: debug import_stage', logs)
def test_import_stage_level2(self):
self.config['verbose'] = 2
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u'dummy: warning import_stage', logs)
self.assertIn(u'dummy: info import_stage', logs)
self.assertIn(u'dummy: debug import_stage', logs)
@_common.slow_test()
class ConcurrentEventsTest(TestCase, helper.TestHelper):
"""Similar to LoggingLevelTest but lower-level and focused on multiple
events interaction. Since this is a bit heavy we don't do it in
LoggingLevelTest.
"""
class DummyPlugin(plugins.BeetsPlugin):
def __init__(self, test_case):
plugins.BeetsPlugin.__init__(self, 'dummy')
self.register_listener('dummy_event1', self.listener1)
self.register_listener('dummy_event2', self.listener2)
self.lock1 = threading.Lock()
self.lock2 = threading.Lock()
self.test_case = test_case
self.exc_info = None
self.t1_step = self.t2_step = 0
def log_all(self, name):
self._log.debug(u'debug ' + name)
self._log.info(u'info ' + name)
self._log.warning(u'warning ' + name)
def listener1(self):
try:
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 1
self.lock1.acquire()
self.test_case.assertEqual(self._log.level, log.INFO)
self.t1_step = 2
except Exception:
import sys
self.exc_info = sys.exc_info()
def listener2(self):
try:
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 1
self.lock2.acquire()
self.test_case.assertEqual(self._log.level, log.DEBUG)
self.t2_step = 2
except Exception:
import sys
self.exc_info = sys.exc_info()
def setUp(self):
self.setup_beets(disk=True)
def tearDown(self):
self.teardown_beets()
def test_concurrent_events(self):
dp = self.DummyPlugin(self)
def check_dp_exc():
if dp.exc_info:
raise dp.exc_info[1], None, dp.exc_info[2]
try:
dp.lock1.acquire()
dp.lock2.acquire()
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 1
t1 = threading.Thread(target=dp.listeners['dummy_event1'][0])
t1.start() # blocked. t1 tested its log level
while dp.t1_step != 1:
check_dp_exc()
self.assertTrue(t1.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
self.config['verbose'] = 2
t2 = threading.Thread(target=dp.listeners['dummy_event2'][0])
t2.start() # blocked. t2 tested its log level
while dp.t2_step != 1:
check_dp_exc()
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock1.release() # dummy_event1 tests its log level + finishes
while dp.t1_step != 2:
check_dp_exc()
t1.join(.1)
self.assertFalse(t1.is_alive())
self.assertTrue(t2.is_alive())
self.assertEqual(dp._log.level, log.NOTSET)
dp.lock2.release() # dummy_event2 tests its log level + finishes
while dp.t2_step != 2:
check_dp_exc()
t2.join(.1)
self.assertFalse(t2.is_alive())
except:
print(u"Alive threads:", threading.enumerate())
if dp.lock1.locked():
print(u"Releasing lock1 after exception in test")
dp.lock1.release()
if dp.lock2.locked():
print(u"Releasing lock2 after exception in test")
dp.lock2.release()
print(u"Alive threads:", threading.enumerate())
raise
def test_root_logger_levels(self):
"""Root logger level should be shared between threads.
"""
self.config['threaded'] = True
blog.getLogger('beets').set_global_level(blog.WARNING)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertEqual(logs, [])
blog.getLogger('beets').set_global_level(blog.INFO)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
for l in logs:
self.assertIn(u"import", l)
self.assertIn(u"album", l)
blog.getLogger('beets').set_global_level(blog.DEBUG)
with helper.capture_log() as logs:
importer = self.create_importer()
importer.run()
self.assertIn(u"Sending event: database_change", logs)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
queues.py
|
# This is a simple unit-test designed to see whether multiprocessing Queues are
# leaking on Linux. They don't seem to be, which is good for most people, but
# leaves me still with an unexplained leak.
import os
import sys
import time
import Queue
import psutil
import random
import datetime
import multiprocessing
max_readings = 10000
class Reading(object):
count = 0
def __init__(self):
Reading.count += 1
self.id = Reading.count
self.timestamp = datetime.datetime.now()
self.spectrum = [int(65536 * random.random()) for x in range(1024)]
self.laser_temperature_raw = int(self.rand(0, 4096))
self.laser_temperature_degC = int(self.rand(20, 40))
self.detector_temperature_raw = int(self.rand(0, 4096))
self.detector_temperature_degC = int(self.rand(-20, 60))
self.secondary_adc_raw = int(self.rand(0, 4096))
self.secondary_adc_calibrated = float(self.rand(0, 100))
self.laser_status = None
self.laser_power = int(self.rand(0, 100))
self.laser_power_in_mW = True
self.failure = None
self.averaged = False
self.session_count = self.id
self.area_scan_row_count = int(self.rand(0, 1024))
self.battery_raw = int(self.rand(0, 4096))
self.battery_percentage = int(self.rand(0, 100))
self.battery_charging = None
def rand(self, lo, hi):
return lo + (hi - lo) * random.random()
class SubprocessArgs(object):
def __init__(self, response_queue):
self.response_queue = response_queue
class Wrapper(object):
def __init__(self):
self.manager = multiprocessing.Manager()
self.response_queue = self.manager.Queue(100)
self.poller = None
def connect(self):
subprocessArgs = SubprocessArgs(response_queue = self.response_queue)
self.poller = multiprocessing.Process(target=self.continuous_poll, args=(subprocessArgs,))
self.poller.start()
def acquire_data(self):
reading = None
last_reading = None
dequeue_count = 0
while True:
try:
reading = self.response_queue.get_nowait()
if reading is None:
# nothing in the queue
return None
elif isinstance(reading, bool):
return reading
else:
print "acquire_data: read Reading %d" % reading.id
dequeue_count += 1
last_reading = reading
except Queue.Empty:
break
if dequeue_count > 1:
print "acquire_data: discarded %d readings" % (dequeue_count - 1)
return last_reading
def continuous_poll(self, args):
pid = os.getpid()
print "worker: entering loop in process %d" % pid
count = 0
while True:
# sleep_sec = 0.01 + (.01 * random.random())
# print "worker: sleeping %.2f sec" % sleep_sec
# time.sleep(sleep_sec)
reading = Reading()
print "worker: enqueuing reading %d" % reading.id
args.response_queue.put(reading, timeout=1)
count += 1
if count >= max_readings:
print "worker: enqueued %d readings, quitting" % count
break
print "worker: sending poison-pill"
args.response_queue.put(True, timeout=1)
print "worker: exiting"
sys.exit()
parent_pid = os.getpid()
print "Main: Running from pid %d" % parent_pid
print "Main: instantiating Wrapper"
wrapper = Wrapper()
print "Main: connecting to background process"
wrapper.connect()
print "Main: reading spectra"
while True:
reading = wrapper.acquire_data()
if reading is None:
print "Main: no reading available"
elif isinstance(reading, bool) and reading == True:
print("Main: received poison-pill, exiting")
break
else:
print "Main: received reading %d (%s)" % (reading.id, reading.spectrum[:10])
size_in_bytes = psutil.Process(parent_pid).memory_info().rss
print "Main: memory = %d bytes" % size_in_bytes
print "Main: sleeping 1 sec"
time.sleep(1)
print "Main: exiting"
|
sdk_worker_main.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
# pytype: skip-file
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.runners.worker.worker_status import thread_dump
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
_LOGGER = logging.getLogger(__name__)
class StatusServer(object):
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(thread_dump().encode('utf-8'))
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
_LOGGER.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
_LOGGER.info('Logging handler created.')
except Exception:
_LOGGER.error("Failed to set up logging handler, continuing without.",
exc_info=True)
fn_log_handler = None
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(name='status_http_server',
target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
_LOGGER.info('semi_persistent_directory: %s', semi_persistent_directory)
_worker_id = os.environ.get('WORKER_ID', None)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
_LOGGER.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
_LOGGER.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
control_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
status_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
control_service_descriptor)
if 'STATUS_API_SERVICE_DESCRIPTOR' in os.environ:
text_format.Merge(os.environ['STATUS_API_SERVICE_DESCRIPTOR'],
status_service_descriptor)
# TODO(robertwb): Support credentials.
assert not control_service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=control_service_descriptor.url,
status_address=status_service_descriptor.url,
worker_id=_worker_id,
state_cache_size=_get_state_cache_size(sdk_pipeline_options),
data_buffer_time_limit_ms=_get_data_buffer_time_limit_ms(
sdk_pipeline_options),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(ProfilingOptions))
).run()
_LOGGER.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_state_cache_size(pipeline_options):
"""Defines the upper number of state items to cache.
Note: state_cache_size is an experimental flag and might not be available in
future releases.
Returns:
an int indicating the maximum number of items to cache.
Default is 0 (disabled)
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'state_cache_size=', experiment):
return int(
re.match(r'state_cache_size=(?P<state_cache_size>.*)',
experiment).group('state_cache_size'))
return 0
def _get_data_buffer_time_limit_ms(pipeline_options):
"""Defines the time limt of the outbound data buffering.
Note: data_buffer_time_limit_ms is an experimental flag and might
not be available in future releases.
Returns:
an int indicating the time limit in milliseconds of the the outbound
data buffering. Default is 0 (disabled)
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'data_buffer_time_limit_ms=', experiment):
return int(
re.match(
r'data_buffer_time_limit_ms=(?P<data_buffer_time_limit_ms>.*)',
experiment).group('data_buffer_time_limit_ms'))
return 0
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
_LOGGER.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
_LOGGER.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
|
failure_handler_test.py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PreemptionCheckpointHandler."""
import os
import random
import re
import signal
import sys
import threading
import time
from absl.testing import parameterized
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.checkpoint import checkpoint as tracking_util
from tensorflow.python.checkpoint import checkpoint_management
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute.failure_handling import failure_handling
from tensorflow.python.distribute.failure_handling import gce_util
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.module import module
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
mock = test.mock
CLUSTER_SIZE = 4
EPOCHS_TO_RUN = 8
STEPS_PER_EPOCH = 15
MAX_WAIT_TIME = 40
def _is_oss():
"""Returns whether the test is run under OSS."""
return len(sys.argv) >= 1 and 'bazel' in sys.argv[0]
def _make_checkpoint_manager(checkpoint, checkpoint_dir, cluster_resolver):
if not cluster_resolver.cluster_spec().as_dict() or (
multi_worker_util.is_chief(
cluster_spec=cluster_resolver.cluster_spec(),
task_type=cluster_resolver.task_type,
task_id=cluster_resolver.task_id)):
return checkpoint_management.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=1)
else:
return checkpoint_management.CheckpointManager(
checkpoint,
directory=failure_handling._non_chief_checkpoint_dir(
checkpoint_dir, cluster_resolver.task_id),
max_to_keep=1)
def raise_if_not_all_exit(grace_period, mpr):
"""Wait for all cluster to exit with a time out."""
waiting_time = 0
exit_process_count = 0
# This addition to mitigate the fact that our step time is too short in test
while exit_process_count != CLUSTER_SIZE and waiting_time < max(
grace_period + 15, MAX_WAIT_TIME):
exit_process_count = 0
for worker_id in range(CLUSTER_SIZE):
if not mpr.process_exists('worker', worker_id):
exit_process_count += 1
waiting_time += 1
time.sleep(1)
if waiting_time == max(grace_period + 5, 40):
raise RuntimeError('Waited long but at least one worker still exist. '
'Considering size of our model, this should not'
' happen.')
class PreemptionCheckpointTest(test.TestCase, parameterized.TestCase):
"""Integration test for PreemptionCheckpointHandler."""
def _maybe_trigger_a_preemption(self, training_started_event,
trigger_it=False):
if not training_started_event:
return
clear_events = [
event for event in training_started_event if not event.is_set()
]
if clear_events:
if trigger_it:
logging.info('Set preemption signal')
clear_events[0].set()
elif random.randrange(0, 9) > 6:
clear_events[0].set()
def worker_fn(self,
checkpoint_dir,
cluster_spec,
input_arg='checkpoint',
training_started_event=None,
raise_app_error_on_worker=None,
training_restarted=None,
training_finished=None,
termination_config=failure_handling.TerminationConfig()):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
class Model(module.Module):
def __init__(self):
self.v = variables_lib.Variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.SUM)
@def_function.function(input_signature=[])
def __call__(self):
return self.v.read_value()
with mock.patch.object(gce_util, 'on_gcp', lambda: False):
with strategy.scope():
model = Model()
# Named it fh_ckpt because it'd be better that the user have their
# regular checkpoint separate from the checkpoint for
# PreemptionCheckpointHandler, since we will create CheckpointManager
# to manage the checkpoint and only one CheckpointManager should be
# active in a particular directory at a time.
fh_ckpt = tracking_util.Checkpoint(model=model)
if input_arg == 'checkpoint':
checkpoint_or_manager = fh_ckpt
else:
checkpoint_or_manager = _make_checkpoint_manager(
fh_ckpt, checkpoint_dir, strategy.cluster_resolver)
preemption_handler = (
failure_handling.PreemptionCheckpointHandler(
strategy.cluster_resolver, checkpoint_or_manager,
checkpoint_dir, termination_config))
def distributed_train_step(current_epoch, current_step):
@def_function.function
def train_step():
if cluster_spec and (
distribution_strategy_context.get_distribution_strategy(
).cluster_resolver.task_id == raise_app_error_on_worker):
raise errors_impl.ResourceExhaustedError(
node_def=None, op=None, message='Running out of resources')
model.v.assign_add(constant_op.constant(1.))
strategy.run(train_step)
if current_step == STEPS_PER_EPOCH - 1:
logging.info('epoch %d finished', current_epoch)
logging.info('Start training at %d',
preemption_handler.total_run_calls)
# If the training process has been restarted, verify that the expected
# number of checkpoints have been written.
# we also want to check training_finished, because there's a corner case
# where the signal is sent quite late and training finishes before the
# grace period ends.
if training_restarted and training_restarted.is_set(
) and not training_finished.is_set():
logging.info('training restarted')
match_group = [
re.search(r'.*ckpt-(\d+).index', a_file)
for a_file in gfile.ListDirectory(checkpoint_dir)
]
checkpoint_index = [
a_match.group(1) for a_match in match_group if a_match
]
if getattr(termination_config, 'grace_period', 0):
# Two checkpoints were saved for the extended grace period.
self.assertEqual(int(checkpoint_index[0]), 2)
else:
self.assertEqual(int(checkpoint_index[0]), 1)
for epoch in range(
preemption_handler.total_run_calls // STEPS_PER_EPOCH,
EPOCHS_TO_RUN):
for step in range(
preemption_handler.total_run_calls % STEPS_PER_EPOCH,
STEPS_PER_EPOCH):
preemption_handler.run(distributed_train_step, epoch, step)
# Add some randomness to when preemption actually happens. We should
# trigger it for sure if the training is coming to an end and it hasn't
# been triggered yet.
if epoch >= EPOCHS_TO_RUN - 2:
trigger_it = True
else:
trigger_it = False
self._maybe_trigger_a_preemption(training_started_event, trigger_it)
training_finished.set()
logging.info('Training finished.')
self.assertEqual(
model.v.numpy(),
strategy.num_replicas_in_sync * EPOCHS_TO_RUN * STEPS_PER_EPOCH)
@combinations.generate(
combinations.combine(input_arg=['checkpoint', 'manager'],
mwms_mode=['local', 'multi_worker'],))
def test_preemption_checkpointing(self, input_arg, mwms_mode):
has_chief = False
if _is_oss():
rpc_layer = 'grpc'
else:
rpc_layer = 'grpc+loas'
checkpoint_dir = os.path.join(self.get_temp_dir(), 'fh_ckpt')
if mwms_mode == 'multi_worker':
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=CLUSTER_SIZE)
training_started_event = multi_process_runner.manager().Event()
training_restarted = multi_process_runner.manager().Event()
training_finished = multi_process_runner.manager().Event()
mpr = multi_process_runner.MultiProcessRunner(
self.worker_fn,
cluster_spec,
args=(checkpoint_dir, cluster_spec, input_arg,
[training_started_event
], None, training_restarted, training_finished),
rpc_layer=rpc_layer,
return_output=True,
dependence_on_chief=has_chief)
logging.info('Cluster starting.')
mpr.start()
while not training_started_event.is_set():
time.sleep(1)
logging.info('sending sigterm')
killed_worker = random.randrange(0, CLUSTER_SIZE)
os.kill(mpr.get_process_id('worker', killed_worker), signal.SIGTERM)
logging.info('sigterm sent')
raise_if_not_all_exit(0, mpr)
logging.info('restarting workers')
training_restarted.set()
for worker_id in range(CLUSTER_SIZE):
mpr.start_single_process('worker', worker_id, cluster_spec)
logging.info('workers restarted')
mpr.join(timeout=270)
else:
cluster_spec = server_lib.ClusterSpec({})
training_started_event = threading.Event()
training_restarted = threading.Event()
training_finished = threading.Event()
def sending_sigterm(training_started_event):
while not training_started_event.is_set():
time.sleep(1)
logging.info('sending sigterm')
training_started_event.set()
os.kill(os.getpid(), signal.SIGTERM)
preemption_sender_thread = threading.Thread(
target=sending_sigterm, args=(training_started_event,))
preemption_sender_thread.start()
caught_exit = False
try:
self.worker_fn(checkpoint_dir, cluster_spec, input_arg,
[training_started_event], None, training_restarted,
training_finished)
except SystemExit as exit_error:
caught_exit = True
# We cannot use assertRaise instead, since termination is not always
# triggered.
self.assertEqual(exit_error.code, 42) # pylint: disable=g-assert-in-except
preemption_sender_thread.join(10)
if not training_finished.is_set():
self.assertTrue(caught_exit)
logging.info('restarting workers')
training_restarted.set()
self.worker_fn(checkpoint_dir, cluster_spec, input_arg,
[training_started_event], None, training_restarted,
training_finished)
def test_error_propagation(self):
error_worker = random.randint(0, CLUSTER_SIZE)
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=False, num_workers=CLUSTER_SIZE)
checkpoint_dir = self.get_temp_dir()
def assert_raise_error():
# Asserts that an error raised during a training step on one of the worker
# is caught on all workers.
with self.assertRaises(errors_impl.ResourceExhaustedError) as error:
self.worker_fn(
checkpoint_dir,
cluster_spec,
raise_app_error_on_worker=error_worker)
self.assertIn('Running out of resources', str(error.exception))
if _is_oss():
rpc_layer = 'grpc'
else:
rpc_layer = 'grpc+loas'
mpr = multi_process_runner.MultiProcessRunner(
assert_raise_error,
cluster_spec,
rpc_layer=rpc_layer,
return_output=True,
dependence_on_chief=False)
logging.info('Cluster starting.')
mpr.start()
mpr.join(timeout=250)
@combinations.generate(
combinations.combine(input_arg=['checkpoint', 'manager'],
mwms_mode=['local', 'multi_worker'],))
def test_grace_period_continue_training(self, input_arg, mwms_mode):
if _is_oss():
rpc_layer = 'grpc'
else:
rpc_layer = 'grpc+loas'
checkpoint_dir = os.path.join(self.get_temp_dir(), 'fh_ckpt')
if mwms_mode == 'multi_worker':
grace_period = 5
termination_config = failure_handling.TerminationConfig(
grace_period=grace_period)
has_chief = False
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=has_chief,
num_workers=CLUSTER_SIZE)
training_started_event = multi_process_runner.manager().Event()
training_restarted = multi_process_runner.manager().Event()
training_finished = multi_process_runner.manager().Event()
mpr = multi_process_runner.MultiProcessRunner(
self.worker_fn,
cluster_spec,
args=(checkpoint_dir, cluster_spec, input_arg,
[training_started_event], None, training_restarted,
training_finished, termination_config),
rpc_layer=rpc_layer,
return_output=True,
dependence_on_chief=has_chief)
logging.info('Cluster starting.')
mpr.start()
while not training_started_event.is_set():
time.sleep(1)
killed_worker = random.randrange(0, CLUSTER_SIZE)
logging.info('sending SIGTERM')
os.kill(mpr.get_process_id('worker', killed_worker), signal.SIGTERM)
logging.info('SIGTERM sent')
raise_if_not_all_exit(grace_period, mpr)
logging.info('restarting workers')
training_restarted.set()
for worker_id in range(CLUSTER_SIZE):
mpr.start_single_process('worker', worker_id, cluster_spec)
logging.info('workers restarted')
mpr.join(timeout=250)
else:
# This is because single worker trains super fast with regards to the size
# of "model" here. With a longer grace period, the training just finishes
# within the grace period so we can't verify the exit behavior.
grace_period = 1
termination_config = failure_handling.TerminationConfig(
grace_period=grace_period)
cluster_spec = server_lib.ClusterSpec({})
training_started_event = threading.Event()
training_restarted = threading.Event()
training_finished = threading.Event()
def sending_sigterm(training_started_event):
while not training_started_event.is_set():
time.sleep(1)
logging.info('sending sigterm')
training_started_event.set()
os.kill(os.getpid(), signal.SIGTERM)
preemption_sender_thread = threading.Thread(
target=sending_sigterm, args=(training_started_event,))
preemption_sender_thread.start()
caught_exit = False
try:
self.worker_fn(checkpoint_dir, cluster_spec, input_arg,
[training_started_event], None, training_restarted,
training_finished, termination_config)
except SystemExit as exit_error:
caught_exit = True
# We cannot use assertRaise instead, since termination is not always
# triggered.
self.assertEqual(exit_error.code, 42) # pylint: disable=g-assert-in-except
preemption_sender_thread.join(10)
if not training_finished.is_set():
self.assertTrue(caught_exit)
logging.info('restarting workers')
training_restarted.set()
self.worker_fn(checkpoint_dir, cluster_spec, input_arg,
[training_started_event], None, training_restarted,
training_finished, termination_config)
if __name__ == '__main__':
test_util.main()
|
cli.py
|
# encoding: utf-8
from __future__ import print_function
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
from optparse import OptionConflictError
import traceback
from six import text_type
from six.moves import input, xrange
from six.moves.urllib.error import HTTPError
from six.moves.urllib.parse import urljoin, urlparse
from six.moves.urllib.request import urlopen
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
from ckan.config.middleware import make_app
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.plugins as p
from ckan.common import config
# This is a test Flask request context to be used internally.
# Do not use it!
_cli_test_request_context = None
# NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while '@' not in data_dict.get('email', ''):
data_dict['email'] = input('Email address: ').strip()
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError as e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().strip().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
# Set this internal test request context with the configured environment so
# it can be used when calling url_for from the CLI.
global _cli_test_request_context
app = make_app(conf.global_conf, **conf.local_conf)
flask_app = app.apps['flask_app']._wsgi_app
_cli_test_request_context = flask_app.test_request_context()
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
site_user = None
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
return site_user
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
self.site_user = load_config(self.options.config, load_site_user)
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print('Initialising DB: SUCCESS')
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print('Cleaning DB: SUCCESS')
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print('Creating DB: SUCCESS')
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print('Dumped database to: %s' % filepath)
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print('Loaded CKAN database: %s' % filepath)
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print('Need pg_dump filepath')
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print('Upgrading DB')
import ckan.model as model
model.repo.upgrade_db()
print('Rebuilding search index')
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print('Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.')
print('Done')
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print("failed to fetch %s (code %s)" % (url,
response.status_code))
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError as e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print("Saved url %s" % url)
def version(self):
from ckan.model import Session
print(Session.execute('select version from '
'migrate_version;').fetchall())
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print(self.usage)
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print('Command %s not recognized' % cmd)
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print('Missing parameter: dataset-name')
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print('Command %s not recognized' % cmd)
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print(RDFExport.__doc__)
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urlopen(url).read()
except HTTPError as e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError as ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
password
email
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print('Command %s not recognized' % cmd)
def list(self):
import ckan.model as model
print('Sysadmins:')
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print('count = %i' % sysadmins.count())
for sysadmin in sysadmins:
print('%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id))
def add(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('User "%s" not found' % username)
makeuser = input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(text_type(username))
else:
print('Exiting ...')
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print('Added %s as sysadmin' % username)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('Error: user "%s" not found!' % username)
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print('Users:')
users = model.Session.query(model.User).filter_by(state='active')
print('count = %i' % users.count())
for user in users:
print(self.get_user_str(user))
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(text_type(username))
print('User: \n', user)
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print('Done')
def search(self):
import ckan.model as model
if len(self.args) < 2:
print('Need user name query string.')
return
query_str = self.args[1]
query = model.User.search(query_str)
print('%i users matching %r:' % (query.count(), query_str))
for user in query.all():
print(self.get_user_str(user))
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print(self.usage)
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print('Datasets:')
datasets = model.Session.query(model.Package)
print('count = %i' % datasets.count())
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print('%s %s %s' % (dataset.id, dataset.name, state))
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(text_type(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print('%s %s -> %s' % (dataset.name, old_state, dataset.state))
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print('%s purged' % name)
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print('Command %s not recognized' % cmd)
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print('tracking updated for %s' % start_date)
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print('%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date))
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print("Error: package %s not found." % (package_id))
not_found += 1
except KeyboardInterrupt:
print("Stopped.")
return
except:
raise
print('search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else ""))
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print(plugin + ':')
print('-' * (len(plugin) + 1))
if p['doc']:
print(p['doc'])
print('Implements:')
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print(' %s' % i)
if extra:
print(extra)
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print('Creating %s test data' % cmd)
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print('Created user %r with password %r and apikey %r' %
('tester', 'tester', 'tester'))
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print('Command %s not recognized' % cmd)
raise NotImplementedError
if self.verbose:
print('Creating %s test data: Complete!' % cmd)
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print('App error: ', url.strip())
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print('Only top 10% of lines shown')
print('Written profile to: %s' % output_filename)
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print(hue, saturation)
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
public = config.get(u'ckan.base_public_folder')
path = os.path.dirname(__file__)
path = os.path.join(path, '..', public, 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print('custom colors removed.')
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print('ERROR: invalid color')
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print('ERROR argument `%s` not recognised' % arg)
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print('%s: %s;\n' % (self.rules[i], colors[i]))
f.close
print('Color scheme has been created.')
print('Make sure less is run for changes to take effect.')
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print('command not recognised')
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print('zh_TW has been mangled')
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print('removing %s' % path)
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
import ckan.lib.fanstatic_resources as fanstatic_resources
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print("Minified file '{0}'".format(path))
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print('compile %s.css' % color)
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
print(output)
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.options = self.options
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print(self.usage)
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print(self.usage)
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError as e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print('No resource views to delete')
return
print('This command will delete.\n')
for row in results:
print('%s of type %s' % (row[1], row[0]))
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print('Not Deleting.')
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print('Deleted resource views.')
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError as e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
dhcp.py
|
#!/usr/bin/python3
import time
import threading
import struct
import queue
import collections
import traceback
import random
import socket
import heapq
from listener import *
def get_host_ip_addresses():
return gethostbyname_ex(gethostname())[2]
class PriorityQueue(object):
def __init__(self):
self._queue = []
self._index = 0
def put(self, item):
heapq.heappush(self._queue, (self._index, item))
self._index += 1
def get(self):
return heapq.heappop(self._queue)[-1]
def qsize(self):
return len(self._queue)
class WriteBootProtocolPacket(object):
message_type = 2 # 1 for client -> server 2 for server -> client
hardware_type = 1
hardware_address_length = 6
hops = 0
transaction_id = None
seconds_elapsed = 0
bootp_flags = 0 # unicast
client_ip_address = '0.0.0.0'
your_ip_address = '0.0.0.0'
next_server_ip_address = '0.0.0.0'
relay_agent_ip_address = '0.0.0.0'
client_mac_address = None
magic_cookie = '99.130.83.99'
parameter_order = []
def __init__(self, configuration):
for i in range(256):
names = ['option_{}'.format(i)]
if i < len(options) and hasattr(configuration, options[i][0]):
names.append(options[i][0])
for name in names:
if hasattr(configuration, name):
setattr(self, name, getattr(configuration, name))
def to_bytes(self):
result = bytearray(236)
result[0] = self.message_type
result[1] = self.hardware_type
result[2] = self.hardware_address_length
result[3] = self.hops
result[4:8] = struct.pack('>I', self.transaction_id)
result[ 8:10] = shortpack(self.seconds_elapsed)
result[10:12] = shortpack(self.bootp_flags)
result[12:16] = inet_aton(self.client_ip_address)
result[16:20] = inet_aton(self.your_ip_address)
result[20:24] = inet_aton(self.next_server_ip_address)
result[24:28] = inet_aton(self.relay_agent_ip_address)
result[28:28 + self.hardware_address_length] = macpack(self.client_mac_address)
result += inet_aton(self.magic_cookie)
for option in self.options:
value = self.get_option(option)
#print(option, value)
if value is None:
continue
result += bytes([option, len(value)]) + value
result += bytes([255])
return bytes(result)
def get_option(self, option):
if option < len(options) and hasattr(self, options[option][0]):
value = getattr(self, options[option][0])
elif hasattr(self, 'option_{}'.format(option)):
value = getattr(self, 'option_{}'.format(option))
else:
return None
function = options[option][2]
if function and value is not None:
value = function(value)
return value
@property
def options(self):
done = list()
# fulfill wishes
for option in self.parameter_order:
if option < len(options) and hasattr(self, options[option][0]) or hasattr(self, 'option_{}'.format(option)):
# this may break with the specification because we must try to fulfill the wishes
if option not in done:
done.append(option)
# add my stuff
for option, o in enumerate(options):
if o[0] and hasattr(self, o[0]):
if option not in done:
done.append(option)
for option in range(256):
if hasattr(self, 'option_{}'.format(option)):
if option not in done:
done.append(option)
return done
def __str__(self):
return str(ReadBootProtocolPacket(self.to_bytes()))
class DelayWorker(object):
def __init__(self):
self.closed = False
self.queue = PriorityQueue()
self.thread = threading.Thread(target = self._delay_response_thread)
self.thread.start()
def _delay_response_thread(self):
while not self.closed:
if self.closed:
break
if self.queue.qsize() > 0:
p = self.queue.get()
t, func, args, kw = p
now = time.time()
if now < t:
time.sleep(0.01)
self.queue.put(p)
else:
func(*args, **kw)
def do_after(self, seconds, func, args = (), kw = {}):
self.queue.put((time.time() + seconds, func, args, kw))
def close(self):
self.closed = True
class Transaction(object):
def __init__(self, server):
self.server = server
self.configuration = server.configuration
self.packets = []
self.done_time = time.time() + self.configuration.length_of_transaction
self.done = False
self.do_after = self.server.delay_worker.do_after
def is_done(self):
return self.done or self.done_time < time.time()
def close(self):
self.done = True
def receive(self, packet):
# packet from client <-> packet.message_type == 1
if packet.message_type == 1 and packet.dhcp_message_type == 'DHCPDISCOVER':
self.do_after(self.configuration.dhcp_offer_after_seconds,
self.received_dhcp_discover, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPREQUEST':
self.do_after(self.configuration.dhcp_acknowledge_after_seconds,
self.received_dhcp_request, (packet,), )
elif packet.message_type == 1 and packet.dhcp_message_type == 'DHCPINFORM':
self.received_dhcp_inform(packet)
else:
return False
return True
def received_dhcp_discover(self, discovery):
if self.is_done(): return
self.configuration.debug('discover:\n {}'.format(str(discovery).replace('\n', '\n\t')))
self.send_offer(discovery)
def send_offer(self, discovery):
# https://tools.ietf.org/html/rfc2131
offer = WriteBootProtocolPacket(self.configuration)
offer.parameter_order = discovery.parameter_request_list
mac = discovery.client_mac_address
ip = offer.your_ip_address = self.server.get_ip_address(discovery)
# offer.client_ip_address =
offer.transaction_id = discovery.transaction_id
# offer.next_server_ip_address =
offer.relay_agent_ip_address = discovery.relay_agent_ip_address
offer.client_mac_address = mac
offer.client_ip_address = discovery.client_ip_address or '0.0.0.0'
offer.bootp_flags = discovery.bootp_flags
offer.dhcp_message_type = 'DHCPOFFER'
offer.client_identifier = mac
self.server.broadcast(offer)
def received_dhcp_request(self, request):
if self.is_done(): return
self.server.client_has_chosen(request)
self.acknowledge(request)
self.close()
def acknowledge(self, request):
ack = WriteBootProtocolPacket(self.configuration)
ack.parameter_order = request.parameter_request_list
ack.transaction_id = request.transaction_id
# ack.next_server_ip_address =
ack.bootp_flags = request.bootp_flags
ack.relay_agent_ip_address = request.relay_agent_ip_address
mac = request.client_mac_address
ack.client_mac_address = mac
requested_ip_address = request.requested_ip_address
ack.client_ip_address = request.client_ip_address or '0.0.0.0'
ack.your_ip_address = self.server.get_ip_address(request)
ack.dhcp_message_type = 'DHCPACK'
self.server.broadcast(ack)
def received_dhcp_inform(self, inform):
self.close()
self.server.client_has_chosen(inform)
class DHCPServerConfiguration(object):
dhcp_offer_after_seconds = 10
dhcp_acknowledge_after_seconds = 10
length_of_transaction = 40
network = '192.168.173.0'
broadcast_address = '255.255.255.255'
subnet_mask = '255.255.255.0'
router = None # list of ips
# 1 day is 86400
ip_address_lease_time = 300 # seconds
domain_name_server = None # list of ips
host_file = 'hosts.csv'
debug = lambda *args, **kw: None
def load(self, file):
with open(file) as f:
exec(f.read(), self.__dict__)
def adjust_if_this_computer_is_a_router(self):
ip_addresses = get_host_ip_addresses()
for ip in reversed(ip_addresses):
if ip.split('.')[-1] == '1':
self.router = [ip]
self.domain_name_server = [ip]
self.network = '.'.join(ip.split('.')[:-1] + ['0'])
self.broadcast_address = '.'.join(ip.split('.')[:-1] + ['255'])
#self.ip_forwarding_enabled = True
#self.non_local_source_routing_enabled = True
#self.perform_mask_discovery = True
def all_ip_addresses(self):
ips = ip_addresses(self.network, self.subnet_mask)
for i in range(5):
next(ips)
return ips
def network_filter(self):
return NETWORK(self.network, self.subnet_mask)
def ip_addresses(network, subnet_mask):
import socket, struct
subnet_mask = struct.unpack('>I', socket.inet_aton(subnet_mask))[0]
network = struct.unpack('>I', socket.inet_aton(network))[0]
network = network & subnet_mask
start = network + 1
end = (network | (~subnet_mask & 0xffffffff))
return (socket.inet_ntoa(struct.pack('>I', i)) for i in range(start, end))
class ALL(object):
def __eq__(self, other):
return True
def __repr__(self):
return self.__class__.__name__
ALL = ALL()
class GREATER(object):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return type(self.value)(other) > self.value
class NETWORK(object):
def __init__(self, network, subnet_mask):
self.subnet_mask = struct.unpack('>I', inet_aton(subnet_mask))[0]
self.network = struct.unpack('>I', inet_aton(network))[0]
def __eq__(self, other):
ip = struct.unpack('>I', inet_aton(other))[0]
return ip & self.subnet_mask == self.network and \
ip - self.network and \
ip - self.network != ~self.subnet_mask & 0xffffffff
class CASEINSENSITIVE(object):
def __init__(self, s):
self.s = s.lower()
def __eq__(self, other):
return self.s == other.lower()
class CSVDatabase(object):
delimiter = ';'
def __init__(self, file_name):
self.file_name = file_name
self.file('a').close() # create file
def file(self, mode = 'r'):
return open(self.file_name, mode)
def get(self, pattern):
pattern = list(pattern)
return [line for line in self.all() if pattern == line]
def add(self, line):
with self.file('a') as f:
f.write(self.delimiter.join(line) + '\n')
def delete(self, pattern):
lines = self.all()
lines_to_delete = self.get(pattern)
self.file('w').close() # empty file
for line in lines:
if line not in lines_to_delete:
self.add(line)
def all(self):
with self.file() as f:
return [list(line.strip().split(self.delimiter)) for line in f]
class Host(object):
def __init__(self, mac, ip, hostname, last_used):
self.mac = mac.upper()
self.ip = ip
self.hostname = hostname
self.last_used = int(last_used)
@classmethod
def from_tuple(cls, line):
mac, ip, hostname, last_used = line
last_used = int(last_used)
return cls(mac, ip, hostname, last_used)
@classmethod
def from_packet(cls, packet):
return cls(packet.client_mac_address,
packet.requested_ip_address or packet.client_ip_address,
packet.host_name or '',
int(time.time()))
@staticmethod
def get_pattern(mac = ALL, ip = ALL, hostname = ALL, last_used = ALL):
return [mac, ip, hostname, last_used]
def to_tuple(self):
return [self.mac, self.ip, self.hostname, str(int(self.last_used))]
def to_pattern(self):
return self.get_pattern(ip = self.ip, mac = self.mac)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.to_tuple() == other.to_tuple()
def has_valid_ip(self):
return self.ip and self.ip != '0.0.0.0'
class HostDatabase(object):
def __init__(self, file_name):
self.db = CSVDatabase(file_name)
def get(self, **kw):
pattern = Host.get_pattern(**kw)
return list(map(Host.from_tuple, self.db.get(pattern)))
def add(self, host):
self.db.add(host.to_tuple())
def delete(self, host = None, **kw):
if host is None:
pattern = Host.get_pattern(**kw)
else:
pattern = host.to_pattern()
self.db.delete(pattern)
def all(self):
return list(map(Host.from_tuple, self.db.all()))
def replace(self, host):
self.delete(host)
self.add(host)
def sorted_hosts(hosts):
hosts = list(hosts)
hosts.sort(key = lambda host: (host.hostname.lower(), host.mac.lower(), host.ip.lower()))
return hosts
class DHCPServer(object):
def __init__(self, configuration = None):
if configuration == None:
configuration = DHCPServerConfiguration()
self.configuration = configuration
self.socket = socket(type = SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind(('', 67))
self.delay_worker = DelayWorker()
self.closed = False
self.transactions = collections.defaultdict(lambda: Transaction(self)) # id: transaction
self.hosts = HostDatabase(self.configuration.host_file)
self.time_started = time.time()
def close(self):
self.socket.close()
self.closed = True
self.delay_worker.close()
for transaction in list(self.transactions.values()):
transaction.close()
def update(self, timeout = 0):
try:
reads = select.select([self.socket], [], [], timeout)[0]
except ValueError:
# ValueError: file descriptor cannot be a negative integer (-1)
return
for socket in reads:
try:
packet = ReadBootProtocolPacket(*socket.recvfrom(4096))
except OSError:
# OSError: [WinError 10038] An operation was attempted on something that is not a socket
pass
else:
self.received(packet)
for transaction_id, transaction in list(self.transactions.items()):
if transaction.is_done():
transaction.close()
self.transactions.pop(transaction_id)
def received(self, packet):
if not self.transactions[packet.transaction_id].receive(packet):
self.configuration.debug('received:\n {}'.format(str(packet).replace('\n', '\n\t')))
def client_has_chosen(self, packet):
self.configuration.debug('client_has_chosen:\n {}'.format(str(packet).replace('\n', '\n\t')))
host = Host.from_packet(packet)
if not host.has_valid_ip():
return
self.hosts.replace(host)
def is_valid_client_address(self, address):
if address is None:
return False
a = address.split('.')
s = self.configuration.subnet_mask.split('.')
n = self.configuration.network.split('.')
return all(s[i] == '0' or a[i] == n[i] for i in range(4))
def get_ip_address(self, packet):
mac_address = packet.client_mac_address
requested_ip_address = packet.requested_ip_address
known_hosts = self.hosts.get(mac = CASEINSENSITIVE(mac_address))
ip = None
if known_hosts:
# 1. choose known ip address
for host in known_hosts:
if self.is_valid_client_address(host.ip):
ip = host.ip
print('known ip:', ip)
if ip is None and self.is_valid_client_address(requested_ip_address):
# 2. choose valid requested ip address
ip = requested_ip_address
print('valid ip:', ip)
if ip is None:
# 3. choose new, free ip address
chosen = False
network_hosts = self.hosts.get(ip = self.configuration.network_filter())
for ip in self.configuration.all_ip_addresses():
if not any(host.ip == ip for host in network_hosts):
chosen = True
break
if not chosen:
# 4. reuse old valid ip address
network_hosts.sort(key = lambda host: host.last_used)
ip = network_hosts[0].ip
assert self.is_valid_client_address(ip)
print('new ip:', ip)
if not any([host.ip == ip for host in known_hosts]):
print('add', mac_address, ip, packet.host_name)
self.hosts.replace(Host(mac_address, ip, packet.host_name or '', time.time()))
return ip
@property
def server_identifiers(self):
return get_host_ip_addresses()
def broadcast(self, packet):
self.configuration.debug('broadcasting:\n {}'.format(str(packet).replace('\n', '\n\t')))
for addr in self.server_identifiers:
broadcast_socket = socket(type = SOCK_DGRAM)
broadcast_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
broadcast_socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
packet.server_identifier = addr
broadcast_socket.bind((addr, 67))
try:
data = packet.to_bytes()
broadcast_socket.sendto(data, ('255.255.255.255', 68))
broadcast_socket.sendto(data, (addr, 68))
finally:
broadcast_socket.close()
def run(self):
while not self.closed:
try:
self.update(1)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
def run_in_thread(self):
thread = threading.Thread(target = self.run)
thread.start()
return thread
def debug_clients(self):
for line in self.ips.all():
line = '\t'.join(line)
if line:
self.configuration.debug(line)
def get_all_hosts(self):
return sorted_hosts(self.hosts.get())
def get_current_hosts(self):
return sorted_hosts(self.hosts.get(last_used = GREATER(self.time_started)))
if __name__ == '__main__':
configuration = DHCPServerConfiguration()
configuration.debug = print
configuration.adjust_if_this_computer_is_a_router()
configuration.router #+= ['192.168.0.1']
configuration.ip_address_lease_time = 60
server = DHCPServer(configuration)
for ip in server.configuration.all_ip_addresses():
assert ip == server.configuration.network_filter()
server.run()
|
bot_controllable_talknet.py
|
import sys
import os
import base64
from typing import Text
import torch
import numpy as np
import tensorflow as tf
import crepe
import scipy
from scipy.io import wavfile
import psola
import io
import nemo
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.tts.models import TalkNetSpectModel
from nemo.collections.tts.models import TalkNetPitchModel
from nemo.collections.tts.models import TalkNetDursModel
from talknet_singer import TalkNetSingerModel
import json
from tqdm import tqdm
import gdown
import zipfile
import resampy
import traceback
import ffmpeg
from flask import Flask, request, render_template, send_from_directory, Response
import uuid
import re
from argparse import ArgumentParser
import textwrap
sys.path.append("hifi-gan")
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE
from models import Generator
from denoiser import Denoiser
import transformers
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration, AutoTokenizer, AutoModelForSequenceClassification, AutoConfig, Conversation, ConversationalPipeline
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import numpy as np
from scipy.special import softmax
import csv
import time
import rtmidi
from twitchio.ext import commands
from dotenv import load_dotenv
import logging
logging.getLogger('nemo_logger').setLevel(logging.ERROR)
transformers.logging.set_verbosity_error()
load_dotenv()
DEVICE = "cpu"
DEVICE2 = "cuda:0" if torch.cuda.is_available() else "cpu"
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
#detect position element in list
def detect(list, element):
for i in range(len(list)):
if list[i] == element:
return i
port= detect(available_ports, 'loopMIDI 1')
midiout.open_port(port) # Select midi port
CPU_PITCH = False
RUN_PATH = os.path.dirname(os.path.realpath(__file__))
UI_MODE = "offline"
torch.set_grad_enabled(False)
if CPU_PITCH:
tf.config.set_visible_devices([], "GPU")
DICT_PATH = os.path.join(RUN_PATH, "horsewords.clean")
# Load models and tokenizer for Blenderbot and sentiment analysis
mname = "facebook/blenderbot-1B-distill"
model_bb = BlenderbotForConditionalGeneration.from_pretrained(mname).to(DEVICE2)
tokenizer_bb = BlenderbotTokenizer.from_pretrained(mname)
nlp = ConversationalPipeline(model=model_bb, tokenizer=tokenizer_bb, device=0)
task='sentiment'
MODEL_S = f"cardiffnlp/twitter-roberta-base-{task}"
MODELP = f"C:\\Users\\nuked\\OneDrive\\Documents\\Script\\TalkNet\\ControllableTalkNet\\sentiment"
MODELPR = f"C:\\Users\\nuked\\OneDrive\\Documents\\Script\\TalkNet\\ControllableTalkNet\\twitter-roberta-base-sentiment"
#DO ONLY THE FIRST TIME
#tokenizer = AutoTokenizer.from_pretrained(MODEL_S)
#tokenizer.save_pretrained(MODELP)
#config.save_pretrained(MODELP)
config_sent = AutoConfig.from_pretrained(MODELP)
tokenizer_sent = AutoTokenizer.from_pretrained(MODELP)
model_sent = AutoModelForSequenceClassification.from_pretrained(MODELPR).to(DEVICE2)
def preprocess(text):
new_text = []
for t in text.split(" "):
t = '@user' if t.startswith('@') and len(t) > 1 else t
t = 'http' if t.startswith('http') else t
new_text.append(t)
return " ".join(new_text)
def play(note, duration):
midiout.send_message([0x90, note, 0x7f])
time.sleep(duration)
midiout.send_message([0x80, note, 0x7f])
def signals(i):
switcher={
"negative":40,
"neutral":36,
"positive":38
}
return switcher.get(i,"Invalid day of week")
def list2file(l,f):
with open(f, 'w') as f:
json.dump(l, f, indent = 6)
def file2list(file):
with open(file, 'r') as f:
return json.load(f)
def load_history(f,conversation):
jj = file2list(f)
for j in jj:
if j["is_user"]==False:
conversation.append_response(j["text"])
conversation.mark_processed()
else:
conversation.add_user_input(j["text"])
return conversation
#smart splits that are not cutting words
def smart_split(str,max_lenght):
list = []
lenght_tot=0
full_line=""
#print(str.split())
for s in str.split():
lgn_w=len(s)
lenght_tot=lenght_tot+lgn_w
#print(f"current lenght sum: {lenght_tot}")
if lenght_tot < max_lenght:
full_line=full_line+" "+s
else:
list.append(full_line)
lenght_tot=len(s)
full_line=s
#append the last words
list.append(full_line)
if len(list)==0:
list=[str]
return list
def smart_split_list(full_text,max_lenght):
line = full_text.split(". ")
sub_line=[]
for l in line:
sub_line= sub_line + smart_split(l,max_lenght)
return sub_line
def load_hifigan(model_name, conf_name):
# Load HiFi-GAN
conf = os.path.join("hifi-gan", conf_name + ".json")
#print(f"Load HiFi-GAN {model_name} conf {conf_name}")
with open(conf) as f:
json_config = json.loads(f.read())
h = AttrDict(json_config)
torch.manual_seed(h.seed)
hifigan = Generator(h).to(torch.device(DEVICE))
state_dict_g = torch.load(model_name, map_location=torch.device(DEVICE))
hifigan.load_state_dict(state_dict_g["generator"])
hifigan.eval()
hifigan.remove_weight_norm()
denoiser = Denoiser(hifigan, mode="normal")
return hifigan, h, denoiser
def generate_json(input, outpath):
output = ""
sample_rate = 22050
lpath = input.split("|")[0].strip()
size = os.stat(lpath).st_size
x = {
"audio_filepath": lpath,
"duration": size / (sample_rate * 2),
"text": input.split("|")[1].strip(),
}
output += json.dumps(x) + "\n"
with open(outpath, "w", encoding="utf8") as w:
w.write(output)
asr_model = (
EncDecCTCModel.from_pretrained(model_name="asr_talknet_aligner").cpu().eval()
)
def forward_extractor(tokens, log_probs, blank):
"""Computes states f and p."""
n, m = len(tokens), log_probs.shape[0]
# `f[s, t]` -- max sum of log probs for `s` first codes
# with `t` first timesteps with ending in `tokens[s]`.
f = np.empty((n + 1, m + 1), dtype=float)
f.fill(-(10 ** 9))
p = np.empty((n + 1, m + 1), dtype=int)
f[0, 0] = 0.0 # Start
for s in range(1, n + 1):
c = tokens[s - 1]
for t in range((s + 1) // 2, m + 1):
f[s, t] = log_probs[t - 1, c]
# Option #1: prev char is equal to current one.
if s == 1 or c == blank or c == tokens[s - 3]:
options = f[s : (s - 2 if s > 1 else None) : -1, t - 1]
else: # Is not equal to current one.
options = f[s : (s - 3 if s > 2 else None) : -1, t - 1]
f[s, t] += np.max(options)
p[s, t] = np.argmax(options)
return f, p
def backward_extractor(f, p):
"""Computes durs from f and p."""
n, m = f.shape
n -= 1
m -= 1
durs = np.zeros(n, dtype=int)
if f[-1, -1] >= f[-2, -1]:
s, t = n, m
else:
s, t = n - 1, m
while s > 0:
durs[s - 1] += 1
s -= p[s, t]
t -= 1
assert durs.shape[0] == n
assert np.sum(durs) == m
assert np.all(durs[1::2] > 0)
return durs
def preprocess_tokens(tokens, blank):
new_tokens = [blank]
for c in tokens:
new_tokens.extend([c, blank])
tokens = new_tokens
return tokens
parser = (
nemo.collections.asr.data.audio_to_text.AudioToCharWithDursF0Dataset.make_vocab(
notation="phonemes",
punct=True,
spaces=True,
stresses=False,
add_blank_at="last",
)
)
arpadict = None
def load_dictionary(dict_path):
arpadict = dict()
with open(dict_path, "r", encoding="utf8") as f:
for l in f.readlines():
word = l.split(" ")
assert len(word) == 2
arpadict[word[0].strip().upper()] = word[1].strip()
return arpadict
def replace_words(input, dictionary):
regex = re.findall(r"[\w'-]+|[^\w'-]", input)
assert input == "".join(regex)
for i in range(len(regex)):
word = regex[i].upper()
if word in dictionary.keys():
regex[i] = "{" + dictionary[word] + "}"
return "".join(regex)
def arpa_parse(input, model):
global arpadict
if arpadict is None:
arpadict = load_dictionary(DICT_PATH)
z = []
space = parser.labels.index(" ")
input = replace_words(input, arpadict)
while "{" in input:
if "}" not in input:
input.replace("{", "")
else:
pre = input[: input.find("{")]
if pre.strip() != "":
x = model.parse(text=pre.strip())
seq_ids = x.squeeze(0).cpu().detach().numpy()
z.extend(seq_ids)
z.append(space)
arpaword = input[input.find("{") + 1 : input.find("}")]
arpaword = (
arpaword.replace("0", "")
.replace("1", "")
.replace("2", "")
.strip()
.split(" ")
)
seq_ids = []
for x in arpaword:
if x == "":
continue
if x.replace("_", " ") not in parser.labels:
continue
seq_ids.append(parser.labels.index(x.replace("_", " ")))
seq_ids.append(space)
z.extend(seq_ids)
input = input[input.find("}") + 1 :]
if input != "":
x = model.parse(text=input.strip())
seq_ids = x.squeeze(0).cpu().detach().numpy()
z.extend(seq_ids)
if z[-1] == space:
z = z[:-1]
if z[0] == space:
z = z[1:]
return [
z[i] for i in range(len(z)) if (i == 0) or (z[i] != z[i - 1]) or (z[i] != space)
]
def to_arpa(input):
arpa = ""
z = []
space = parser.labels.index(" ")
while space in input:
z.append(input[: input.index(space)])
input = input[input.index(space) + 1 :]
z.append(input)
for y in z:
if len(y) == 0:
continue
arpaword = " {"
for s in y:
if parser.labels[s] == " ":
arpaword += "_ "
else:
arpaword += parser.labels[s] + " "
arpaword += "} "
if not arpaword.replace("{", "").replace("}", "").replace(" ", "").isalnum():
arpaword = arpaword.replace("{", "").replace(" }", "")
arpa += arpaword
return arpa.replace(" ", " ").replace(" }", "}").strip()
def get_duration(wav_name, transcript, tokens):
if not os.path.exists(os.path.join(RUN_PATH, "temp")):
os.mkdir(os.path.join(RUN_PATH, "temp"))
if "_" not in transcript:
generate_json(
os.path.join(RUN_PATH, "temp", wav_name + "_conv.wav")
+ "|"
+ transcript.strip(),
os.path.join(RUN_PATH, "temp", wav_name + ".json"),
)
else:
generate_json(
os.path.join(RUN_PATH, "temp", wav_name + "_conv.wav") + "|" + "dummy",
os.path.join(RUN_PATH, "temp", wav_name + ".json"),
)
data_config = {
"manifest_filepath": os.path.join(RUN_PATH, "temp", wav_name + ".json"),
"sample_rate": 22050,
"batch_size": 1,
}
dataset = nemo.collections.asr.data.audio_to_text._AudioTextDataset(
manifest_filepath=data_config["manifest_filepath"],
sample_rate=data_config["sample_rate"],
parser=parser,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=data_config["batch_size"],
collate_fn=dataset.collate_fn,
shuffle=False,
)
blank_id = asr_model.decoder.num_classes_with_blank - 1
for sample_idx, test_sample in tqdm(enumerate(dl), total=len(dl)):
log_probs, _, greedy_predictions = asr_model(
input_signal=test_sample[0], input_signal_length=test_sample[1]
)
log_probs = log_probs[0].cpu().detach().numpy()
target_tokens = preprocess_tokens(tokens, blank_id)
f, p = forward_extractor(target_tokens, log_probs, blank_id)
durs = backward_extractor(f, p)
del test_sample
return durs
return None
def crepe_f0(wav_path, hop_length=256):
# sr, audio = wavfile.read(io.BytesIO(wav_data))
sr, audio = wavfile.read(wav_path)
audio_x = np.arange(0, len(audio)) / 22050.0
f0time, frequency, confidence, activation = crepe.predict(audio, sr, viterbi=True)
x = np.arange(0, len(audio), hop_length) / 22050.0
freq_interp = np.interp(x, f0time, frequency)
conf_interp = np.interp(x, f0time, confidence)
audio_interp = np.interp(x, audio_x, np.absolute(audio)) / 32768.0
weights = [0.5, 0.25, 0.25]
audio_smooth = np.convolve(audio_interp, np.array(weights)[::-1], "same")
conf_threshold = 0.25
audio_threshold = 0.0005
for i in range(len(freq_interp)):
if conf_interp[i] < conf_threshold:
freq_interp[i] = 0.0
if audio_smooth[i] < audio_threshold:
freq_interp[i] = 0.0
# Hack to make f0 and mel lengths equal
if len(audio) % hop_length == 0:
freq_interp = np.pad(freq_interp, pad_width=[0, 1])
return (
torch.from_numpy(freq_interp.astype(np.float32)),
torch.from_numpy(frequency.astype(np.float32)),
)
def f0_to_audio(f0s):
volume = 0.2
sr = 22050
freq = 440.0
base_audio = (
np.sin(2 * np.pi * np.arange(256.0 * len(f0s)) * freq / sr) * volume
).astype(np.float32)
shifted_audio = psola.vocode(base_audio, sr, target_pitch=f0s)
for i in range(len(f0s)):
if f0s[i] == 0.0:
shifted_audio[i * 256 : (i + 1) * 256] = 0.0
print(type(shifted_audio[0]))
buffer = io.BytesIO()
wavfile.write(buffer, sr, shifted_audio.astype(np.float32))
b64 = base64.b64encode(buffer.getvalue())
sound = "data:audio/x-wav;base64," + b64.decode("ascii")
return sound
def update_model(model):
if model is not None and model.split("|")[0] == "Custom":
style = {"margin-bottom": "0.7em", "display": "block"}
else:
style = {"display": "none"}
return style
def update_pitch_options(value):
return ["pf" not in value, "dra" in value, "dra" in value]
def debug_pitch(n_clicks, pitch_clicks, current_f0s):
if not n_clicks or current_f0s is None or n_clicks <= pitch_clicks:
if n_clicks is not None:
pitch_clicks = n_clicks
else:
pitch_clicks = 0
return [
None,
None,
pitch_clicks,
]
pitch_clicks = n_clicks
return [f0_to_audio(current_f0s), playback_style, pitch_clicks]
hifigan_sr = None
def download_model(model, custom_model):
global hifigan_sr, h2, denoiser_sr
d = "https://drive.google.com/uc?id="
if model == "Custom":
drive_id = custom_model
else:
drive_id = model
if drive_id == "" or drive_id is None:
return ("Missing Drive ID", None, None)
if not os.path.exists(os.path.join(RUN_PATH, "models")):
os.mkdir(os.path.join(RUN_PATH, "models"))
if not os.path.exists(os.path.join(RUN_PATH, "models", drive_id)):
os.mkdir(os.path.join(RUN_PATH, "models", drive_id))
zip_path = os.path.join(RUN_PATH, "models", drive_id, "model.zip")
gdown.download(
d + drive_id,
zip_path,
quiet=False,
)
if not os.path.exists(zip_path):
os.rmdir(os.path.join(RUN_PATH, "models", drive_id))
return ("Model download failed", None, None)
if os.stat(zip_path).st_size < 16:
os.remove(zip_path)
os.rmdir(os.path.join(RUN_PATH, "models", drive_id))
return ("Model zip is empty", None, None)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall(os.path.join(RUN_PATH, "models", drive_id))
os.remove(zip_path)
#print("Download super-resolution HiFi-GAN")
# Download super-resolution HiFi-GAN
sr_path = "hifi-gan/hifisr"
if not os.path.exists(sr_path):
gdown.download(
d + "14fOprFAIlCQkVRxsfInhEPG0n-xN4QOa", sr_path, quiet=False
)
if not os.path.exists(sr_path):
raise Exception("HiFI-GAN model failed to download!")
if hifigan_sr is None:
hifigan_sr, h2, denoiser_sr = load_hifigan(sr_path, "config_32k")
#print("END DOWNLOAD")
return (
None,
os.path.join(RUN_PATH, "models", drive_id, "TalkNetSpect.nemo"),
os.path.join(RUN_PATH, "models", drive_id, "hifiganmodel"),
)
tnmodel, tnpath, tndurs, tnpitch = None, None, None, None
hifigan, h, denoiser, hifipath = None, None, None, None
def getSentiment(text,DEVICE2,model_sent,tokenizer_sent):
# Transform input tokens
# Tasks:
# emoji, emotion, hate, irony, offensive, sentiment
# stance/abortion, stance/atheism, stance/climate, stance/feminist, stance/hillary
# download label mapping
labels=[]
mapping_link = f"0 negative\n1 neutral\n2 positive\n"
html = mapping_link.split("\n")
csvreader = csv.reader(html, delimiter='\t')
labels = [row[1] for row in csvreader if len(row) > 1]
#text = preprocess(output_bb)
#react to the question not at the answer
text = preprocess(text)
encoded_input = tokenizer_sent(text, return_tensors='pt').to(DEVICE2)
outputs = model_sent(**encoded_input)
scores = outputs[0][0].cpu().detach().numpy()
scores = softmax(scores)
ranking = np.argsort(scores)
ranking = ranking[::-1]
label=None
for i in range(scores.shape[0]):
l = labels[ranking[i]]
s = scores[ranking[i]]
if(s>0.8):
label=l
if label==None:
label="neutral"
return label
def blande_sentiment(UTTERANCE,DEVICE2,model_sent,tokenizer_sent,name="test"):
#UTTERANCE= input(f"sss{DEVICE}: ")
try:
conversation = Conversation()
fname_base="conversations/base_message_conv.json"
fname=f"conversations/{name}_messages.json"
if os.path.exists(fname):
conversation= load_history(fname,conversation)
else:
print("loading base conversation")
conversation= load_history(fname_base,conversation)
conversation.add_user_input(UTTERANCE)
result = nlp([conversation], do_sample=False, max_length=1000)
messages = []
for is_user, text in result.iter_texts():
messages.append({
'is_user': is_user,
'text': text
})
output_bb =messages[len(messages)-1]["text"].strip()
list2file(messages,fname)
label = getSentiment(UTTERANCE,DEVICE2,model_sent,tokenizer_sent)
print(f"Sentiment detected: {label}")
return label,str(output_bb)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def pad_audio(data, fs, T):
# Calculate target number of samples
N_tar = int(fs * T)
# Calculate number of zero samples to append
shape = data.shape
# Create the target shape
N_pad = N_tar - shape[0]
print("Padding with %s seconds of silence" % str(N_pad/fs) )
shape = (N_pad,) + shape[1:]
# Stack only if there is something to append
if shape[0] > 0:
if len(shape) > 1:
return np.vstack((np.zeros(shape),
data))
else:
return np.hstack((np.zeros(shape),
data))
else:
return data
def generate_audio(n_clicks,model,custom_model,transcript,pitch_options,pitch_factor,wav_name="wavname",f0s=None,f0s_wo_silence=None,silence=0):
print(f"Generateing audio...")
global tnmodel, tnpath, tndurs, tnpitch, hifigan, h, denoiser, hifipath
if n_clicks is None:
raise PreventUpdate
if model is None:
return [None, "No character selected", None, None]
if transcript is None or transcript.strip() == "":
return [
None,
"No transcript entered",
None,
None,
]
if wav_name is None and "dra" not in pitch_options:
return [
None,
"No reference audio selected",
None,
None,
]
load_error, talknet_path, hifigan_path = download_model(
model.split("|")[0], custom_model
)
if load_error is not None:
print(load_error)
return [
None,
load_error,
None,
None,
]
with torch.no_grad():
if tnpath != talknet_path:
singer_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetSinger.nemo"
)
if os.path.exists(singer_path):
tnmodel = TalkNetSingerModel.restore_from(singer_path).to(DEVICE)
else:
tnmodel = TalkNetSpectModel.restore_from(talknet_path).to(DEVICE)
durs_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetDurs.nemo"
)
pitch_path = os.path.join(
os.path.dirname(talknet_path), "TalkNetPitch.nemo"
)
if os.path.exists(durs_path):
tndurs = TalkNetDursModel.restore_from(durs_path)
tnmodel.add_module("_durs_model", tndurs)
tnpitch = TalkNetPitchModel.restore_from(pitch_path)
tnmodel.add_module("_pitch_model", tnpitch)
else:
tndurs = None
tnpitch = None
tnmodel.to(DEVICE)
tnmodel.eval()
tnpath = talknet_path
token_list = arpa_parse(transcript, tnmodel)
tokens = torch.IntTensor(token_list).view(1, -1).to(DEVICE)
arpa = to_arpa(token_list)
print(arpa)
if "dra" in pitch_options:
if tndurs is None or tnpitch is None:
return [
None,
"Model doesn't support pitch prediction",
None,
None,
]
spect = tnmodel.generate_spectrogram(tokens=tokens)
else:
durs = get_duration(wav_name, transcript, token_list)
# Change pitch
if "pf" in pitch_options:
f0_factor = np.power(np.e, (0.0577623 * float(pitch_factor)))
f0s = [x * f0_factor for x in f0s]
f0s_wo_silence = [x * f0_factor for x in f0s_wo_silence]
spect = tnmodel.force_spectrogram(
tokens=tokens,
durs=torch.from_numpy(durs)
.view(1, -1)
.type(torch.LongTensor)
.to(DEVICE),
f0=torch.FloatTensor(f0s).view(1, -1).to(DEVICE),
)
if hifipath != hifigan_path:
hifigan, h, denoiser = load_hifigan(hifigan_path, "config_v1")
hifipath = hifigan_path
y_g_hat = hifigan(spect.float())
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio_denoised = denoiser(audio.view(1, -1), strength=35)[:, 0]
audio_np = (
audio_denoised.detach().cpu().numpy().reshape(-1).astype(np.int16)
)
# Auto-tuning
if "pc" in pitch_options and "dra" not in pitch_options:
_, output_freq, _, _ = crepe.predict(audio_np, 22050, viterbi=True)
output_pitch = torch.from_numpy(output_freq.astype(np.float32))
target_pitch = torch.FloatTensor(f0s_wo_silence).to(DEVICE)
factor = torch.mean(output_pitch) / torch.mean(target_pitch)
octaves = [0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0]
nearest_octave = min(octaves, key=lambda x: abs(x - factor))
target_pitch *= nearest_octave
if len(target_pitch) < len(output_pitch):
target_pitch = torch.nn.functional.pad(
target_pitch,
(0, list(output_pitch.shape)[0] - list(target_pitch.shape)[0]),
"constant",
0,
)
if len(target_pitch) > len(output_pitch):
target_pitch = target_pitch[0 : list(output_pitch.shape)[0]]
audio_np = psola.vocode(
audio_np, 22050, target_pitch=target_pitch
).astype(np.float32)
normalize = (1.0 / np.max(np.abs(audio_np))) ** 0.9
audio_np = audio_np * normalize * MAX_WAV_VALUE
audio_np = audio_np.astype(np.int16)
# Resample to 32k
wave = resampy.resample(
audio_np,
h.sampling_rate,
h2.sampling_rate,
filter="sinc_window",
window=scipy.signal.windows.hann,
num_zeros=8,
)
wave_out = wave.astype(np.int16)
# HiFi-GAN super-resolution
wave = wave / MAX_WAV_VALUE
wave = torch.FloatTensor(wave).to(DEVICE)
new_mel = mel_spectrogram(
wave.unsqueeze(0),
h2.n_fft,
h2.num_mels,
h2.sampling_rate,
h2.hop_size,
h2.win_size,
h2.fmin,
h2.fmax,
)
y_g_hat2 = hifigan_sr(new_mel)
audio2 = y_g_hat2.squeeze()
audio2 = audio2 * MAX_WAV_VALUE
audio2_denoised = denoiser(audio2.view(1, -1), strength=35)[:, 0]
# High-pass filter, mixing and denormalizing
audio2_denoised = audio2_denoised.detach().cpu().numpy().reshape(-1)
b = scipy.signal.firwin(
101, cutoff=10500, fs=h2.sampling_rate, pass_zero=False
)
y = scipy.signal.lfilter(b, [1.0], audio2_denoised)
y *= 4.0 # superres strength
y_out = y.astype(np.int16)
y_padded = np.zeros(wave_out.shape)
y_padded[: y_out.shape[0]] = y_out
sr_mix = wave_out + y_padded
out_data = pad_audio(sr_mix, 30000, silence)
audio_array = out_data.astype(np.int16)
return audio_array
def sanitize_input(input_str):
stopwords = readListFromFile("Assets/emoticon.lst")
for i in stopwords:
n=input_str.replace(i.strip(),'')
input_str=n
result = input_str.strip()
return result.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace("’", "'").replace("“", "\"").replace("”", "\"").replace("‘","").replace("(",",").replace(")",",")
def sanitize_output(text):
return text.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace("’", "'").replace("“", "\"").replace("”", "\"").replace("?", "?,")
def play_audio_buffer(buffer,rate):
import simpleaudio as sa
play_obj = sa.play_buffer(buffer, 2, 2, rate)
play_obj.wait_done()
# script exit
def play_audio(audio_path):
"""
Play audio
"""
try:
import subprocess
subprocess.call(["ffplay", "-nodisp","-af","atempo=0.8", "-autoexit","-hide_banner","-loglevel","error", audio_path])
#if sys.platform == "win32":
# os.startfile(audio_path)
#else:
# opener = "open" if sys.platform == "darwin" else "xdg-open"
# subprocess.call([opener, audio_path])
except Exception:
return str(traceback.format_exc())
def readListFromFile(file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
return lines
def readFile(file_path):
with open(file_path, 'r') as f:
return f.read()
def writeFile(fileName, text):
f = open(fileName, "w")
f.write(text)
f.close()
def launch_voice(question,author):
#create file .lock
writeFile("./.lock", "")
if author == "":
print("NO auth, enter in manual mode")
answer=sanitize_input(question)
l= "neutral" #getSentiment(answer,DEVICE2,model_sent,tokenizer_sent)
delay=0
else:
#get text
req_text = sanitize_input(question)
if req_text!="":
print("Sanitized input: "+req_text)
writeFile("current.txt", f"{author}'s turn!")
#get answer and sentiment
l,answer = blande_sentiment(req_text,DEVICE2,model_sent,tokenizer_sent,author)
answer = sanitize_output(f"{answer}")
else:
print("Skip because it's emoticon only")
delay=15
wav_name="ok"
list_chunks=textwrap.wrap(answer, 700)
for chunk in list_chunks:
#get audio voice
#1KgVnjrnxZTXgjnI56ilkq5G4UJCbbwZZ|default fluttershy
#1QnOliOAmerMUNuo2wXoH-YoainoSjZen|default default
#1_ztAbe5YArCMwyyQ_G9lUiz74ym5xJKC|default luna
#1YkV1VtP1w5XOx3jYYarrCKSzXCB_FLCy|default scootaloo
#1rcPDqgDeCIHGDdvfOo-fxfA1XeM4g3CB|default trixie
#1BBdTHis91MwnHTt7tD_xtZ-nQ9SgvqD6|singing fluttershy
#10CENYWV5ugTXZbnsldN6OKR7wkDEe7V7|singing default singing
audio_buffer = generate_audio(8, "1QnOliOAmerMUNuo2wXoH-YoainoSjZen|default",None,chunk,"dra",0,wav_name,delay)
try:
audio_numpy= np.concatenate((audio_numpy, audio_buffer), axis=0)
except:
print("Error?")
audio_numpy=audio_buffer
#save last audio
wavfile.write(wav_name+".wav", 30000, audio_numpy)
#send midi for control the character
play(signals(l),1.5)
print(f"Playing audio of: {answer}")
play_audio("ok.wav")
writeFile("current.txt", f" ")
#remove file .lock
os.remove("./.lock")
from threading import Thread
b = 1
class Bot(commands.Bot):
def __init__(self):
# Initialise our Bot with our access token, prefix and a list of channels to join on boot...
super().__init__(token=os.environ['TMI_TOKEN'],
client_id=os.environ['CLIENT_ID'],
nick=os.environ['BOT_NICK'],
prefix="!",
initial_channels=[os.environ['CHANNEL']])
async def event_ready(self):
# We are logged in and ready to chat and use commands...
print(f'Logged in as | {self.nick}')
async def event_message(self, message):
print(f"Message received: {message.content} from {message.author.name}")
#check if file .lock exists
if os.path.isfile("./.lock"):
#print("Skip because .lock file exists")
return
else:
# This is where we handle all of our commands...
if message.content.startswith('@aki '):
#await message.channel.send('Hello!')
mess=message.content.replace('@aki ','')
print(f"Message received: {mess} from {message.author.name}")
#launch_voice(mess,message.author.name)
th = Thread(target=launch_voice, args=(message.content,message.author.name ))
th.start()
else:
print(f"Message received: {message.content} from {message.author.name}")
#launch_voice(message.content,message.author.name)
th = Thread(target=launch_voice, args=(message.content,message.author.name ))
th.start()
#await self.handle_commands(message)
#create menu
def create_menu(options, width=30):
menu = []
for option in options:
menu.append(option.ljust(width))
return menu
#show menu
def show_menu(menu):
i=0
for item in menu:
i=i+1
print(f"{i} - {item}")
#get choice
def get_choice(menu):
show_menu(menu)
choice = input(">>> ")
return choice
#handle choice
def handle_choice(choice, menu, options):
# handle invalid choice
if choice.isdigit() and (int(choice) in range(1, len(options) + 1)):
return options[int(choice) - 1]
else:
print("Invalid choice!")
return handle_choice(get_choice(menu), menu, options)
#main
def main():
# Remove the lock file if it exists
if os.path.isfile("./.lock"):
os.remove("./.lock")
# Create a list of options
options = ["QA Mode","Input Text","Get From Txt","Test Emotion" ,"Exit"]
# Create a menu from the options list
menu = create_menu(options)
choice = handle_choice(get_choice(menu), menu, options)
# Play the selected audio
if choice == "QA Mode":
bot = Bot()
bot.run()
elif choice == "Input Text":
while True:
text = input("Enter text: ")
#break the loop when press crtl+x
if text == "":
break
else:
launch_voice(text,"")
elif choice == "Get From Txt":
text = readFile("conversations/read/read.txt")
launch_voice(text,"")
elif choice == "Test Emotion":
play(signals("positive"),1.5)
# Exit the program
elif choice == "Exit":
exit()
#call main
if __name__ == "__main__":
while True:
try:
main()
except Exception:
main()
|
test_server.py
|
import os
import socketserver
import threading
from http.server import BaseHTTPRequestHandler
class TestHandler(BaseHTTPRequestHandler):
def do_GET(self):
filename = os.path.basename(self.path)
self.send_response(200)
self.send_header('Content-Disposition', 'attachment; filename="{}"'.format(filename))
self.end_headers()
# not sure about this part below
with open('./tests/resources{}'.format(self.path), 'rb') as _file:
self.wfile.write(_file.read())
class TestServer(socketserver.TCPServer):
allow_reuse_address = True
def server(port=8001):
httpd = TestServer(("", port), TestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
return httpd
|
experiment.py
|
'''
Experiment class containing all information about the current setup of experiment.
Lists of liquids (Experiment.liq) and solids (Experiment.sol).
Allowed ranges of quantities for every component in a dict (Experiment.rng)
'''
from bayes_opt import DiscreteBayesianOptimization, UtilityFunction
from kuka_parser import Parser
import os
from time import time, sleep
import datetime
import multiprocessing
from shutil import copyfile
import pickle
import numpy as np
import traceback
import uuid
import math
class Experiment:
MINI_BATCH = 16
BATCH = 16
BATCH_FILES = 1 # number of files that we want to see in the queue, should be BATCH/BATCH_FILES = MINI_BATCH
SLEEP_DELAY = 5 # delay in seconds before querying the queue folder again
directory_path = './'
def __init__(self, directory_path=None):
if directory_path: self.directory_path = directory_path
# General setup of the experiment
self.compounds = [] # Simply the list of compounds to vary
self.properties = {} # Properties (liquid, solid, etc.) of the compounds, e.g. comp['P10'] = {'phys': 'solid', 'proc' : 'cat'}
self.rng = {} # Ranges with resolution, e.g. rng['P10'] = {'lo' : 0, 'hi' : 1, 'res' : 0.1}
self.dbo_ranges = {} # Ranges with resolution formated for dbo (including maping of complements)
self.constraints = [] # list of the constraints that points should satisfy, e.g.
self.controls = [] # list of the control experiments to include in each minibatch
self.complements = {} # Mapping of all complementary variables to single dimensions in optimizer space {'!Complement!_01' : {}}
# Outcomes of ongoing experimentation
self.points = [] # list of measured targets (different experiments), e.g. [{'P10' : 0.1, 'TiO2' : 0.2}, ... ]
self.targets = [] # measured response at the experiments [1.1, 2.1, ...]
self.name = 'Unknown' # Name of the experiment that will appear in all the files
self.batch_number = 1 # Number of the mini_batch to submit next
self.liquids = [] # list of liquids
self.constants = {} # lst of compounds to be kept constant during measurements for the current Search space
self.identical_compounds = {} # dictionary with compound name as key, with dictionarys <compound, <other liquid, concentration factor>>
self.__read_config()
self.__prep_dirs()
self.parser = Parser(self.compounds, self.directory_path) # Associated parser responsible for IO operations
def __read_config(self):
'''
The function reads the optimizer.config file and
fills in all the general parameters of the experiment
Also, read optimizer.state to get the next batch number
'''
try:
with open(self.directory_path + 'optimizer.config', "r") as f:
# compounds list is expected first
compounds_section = True
constraints_section = False
controls_section = False
complements_section = False
self.name = f.readline().rstrip()
for line in f.readlines():
if line.startswith("#") or line.isspace():
continue
else:
if line.startswith("Constraints"):
constraints_section = True
compounds_section = False
controls_section = False
complements_section = False
continue
elif line.startswith("Controls"):
constraints_section = False
compounds_section = False
controls_section = True
complements_section = False
continue
elif line.startswith("Complements"):
constraints_section = False
compounds_section = False
controls_section = False
complements_section = True
cidx = 1
continue
if compounds_section:
tmp = line.rstrip().split(sep=',')
name = tmp[0]
self.compounds.append(name)
self.properties[name] = {'phys': tmp[1], 'proc': tmp[2]}
self.rng[name] = {'lo': float(tmp[3]), 'hi': float(tmp[4]), 'res': float(tmp[5])}
# list liquids
if self.properties[name]['phys'] == 'liquid':
self.liquids.append(name)
# list constants
if self.rng[name]['lo'] == self.rng[name]['hi']:
self.constants[name] = self.rng[name]['lo']
alt_liq = math.floor(len(tmp) / 2) - 3
if alt_liq > 0:
self.identical_compounds[name] = {}
for x in range(alt_liq):
self.identical_compounds[name][tmp[6 + 2 * x]] = tmp[7 + 2 * x]
if constraints_section:
self.constraints.append(line.rstrip())
if controls_section:
cols = line.rstrip().split(sep=',')
d = {}
for col in cols:
tmp = col.split(sep=':')
d[tmp[0].strip()] = float(tmp[1])
self.controls.append(d)
if complements_section:
''' General format:
{!Complement!_cidx: {A_name: compound, A_range: rng, B_name: compound, B_range: rng}}
'''
cols = [col.strip() for col in line.rstrip().split(sep=':')]
assert len(cols) == 2, "Complements come in pairs! Failure at '{}'".format(line)
assert cols[0] in self.compounds, "This complement is not in the compounds: {}".format(
cols[0])
assert cols[1] in self.compounds, "This complement is not in the compounds: {}".format(
cols[1])
self.complements['!Complement!_{}'.format(cidx)] = {'A_name': cols[0],
'B_name': cols[1]}
try:
self.complements['!Complement!_{}'.format(cidx)]['A_range'] = self.rng[cols[0]]
self.complements['!Complement!_{}'.format(cidx)]['B_range'] = self.rng[cols[1]]
except:
raise SyntaxError(
"Please place complements after compounds and ranges in configuration file.")
cidx += 1
# Update of optimizer ranges and constraints from complements
self.dbo_ranges = {p: (r['lo'], r['hi'], r['res']) for p, r in self.rng.items() if r['lo'] < r['hi']}
for key, dict in self.complements.items():
a = self.dbo_ranges.pop(dict['A_name'])
b = self.dbo_ranges.pop(dict['B_name'])
self.dbo_ranges[key] = (0., 1., min(a[2] / (a[1] - a[0]) / 2,
b[2] / (b[1] - b[0]) / 2))
new_constraints = []
for s in self.constraints:
s = s.replace(dict['A_name'],
"(({}<0.5) * (((0.5 - {})/0.5) * ({:f}-{:f}) + {:f}) )".format(key, key, a[1], a[0],
a[0]))
s = s.replace(dict['B_name'],
"(({}>=0.5) * ((({} - 0.5)/0.5) * ({:f}-{:f}) + {:f}) )".format(key, key, b[1], b[0],
b[0]))
new_constraints.append(s)
self.constraints = new_constraints
except IOError:
print("There is no configuration file in the experiment folder.")
try:
with open(self.directory_path + 'optimizer.state', "r") as f:
self.batch_number = int(f.readline().rstrip())
except IOError:
print("No state file present. Generating new one.")
with open(self.directory_path + 'optimizer.state', "w") as f:
f.write('1')
def __prep_dirs(self):
'''
Prepare all necessary directories
'''
dirs = ['running', 'runqueue', 'completed', 'models']
for d in dirs:
try:
os.makedirs(os.path.join(self.directory_path, d), exist_ok=True)
except:
raise
def __repr__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def __str__(self):
output = f"Experiment '{self.name}' is in progress.\n"
output += f"The next batch is {self.batch_number}\n"
output += "Compounds to vary with ranges and resolution:\n"
for composition, bounds in self.rng.items():
# print(bounds)
output += (f' {composition}: [' + str(bounds['lo']) \
+ ', ' + str(bounds['hi']) + ', ' + str(bounds['res']) + ']\n')
return output
def complement_mapping(self, point):
'''
Maps any complementary variables in a point to a single variable in optimizer space,
or maps the single variable in optimizer space back to the complementary points
in configuration space.
Arguments
----------
point: dictionary of variable names and points, updated inplace
'''
if len(self.complements) == 0: return
keys = [key for key in point]
if any(key.split('_')[0] == '!Complement!' for key in keys):
for key in keys:
if key.split('_')[0] != '!Complement!': continue
dict = self.complements[key]
val = point.pop(key)
if val < 0.5:
a_val = ((0.5 - val) / 0.5) * (dict['A_range']['hi'] - dict['A_range']['lo']) + dict['A_range'][
'lo']
b_val = 0
else:
a_val = 0
b_val = ((val - 0.5) / 0.5) * (dict['B_range']['hi'] - dict['B_range']['lo']) + dict['B_range'][
'lo']
point[dict['A_name']] = a_val
point[dict['B_name']] = b_val
else:
for complement, dict in self.complements.items():
a_val = point.pop(dict['A_name'])
b_val = point.pop(dict['B_name'])
if a_val > 0 and b_val > 0: raise RuntimeError("Complementary values are both nonzero")
if a_val > 0:
new_val = 0.5 - 0.5 * (
(a_val - dict['A_range']['lo']) / (dict['A_range']['hi'] - dict['A_range']['lo']))
elif b_val > 0:
new_val = 0.5 + 0.5 * (
(b_val - dict['B_range']['lo']) / (dict['B_range']['hi'] - dict['B_range']['lo']))
else: #Zero case
new_val = 0.5
point[complement] = new_val
return
def output_space(self, path):
"""
Outputs complete space as csv file.
Simple function for testing
Parameters
----------
path
Returns
-------
"""
import pandas as pd
df = pd.DataFrame(self.points)
df['Target'] = self.targets
df.to_csv(path)
def clear_previous_model(self):
'''
Moves previous model to past model folder
data = {} # Data dictionary to be saved
'''
fname = os.path.join(self.directory_path, 'optimizer.pickle')
if os.path.isfile(fname):
copyfile(fname,
os.path.join(self.directory_path, 'models', 'state_{}.pickle'.format(self.batch_number - 1)))
os.remove(fname)
def generate_model(self, verbose=0, random_state=None):
'''
Creates, saves, and returns Bayesian optimizer
Saves previous model in folder according to read batch number, or 0 if none is available
Arguments
----------
verbose: 0 (quiet), 1 (printing only maxima as found), 2 (print every registered point)
random_state: integer for random number generator
Returns
----------
dbo: instance of DiscreteBayesianOptimization
'''
self.clean_queue()
data = {} # Data dictionary to be saved
prange = self.dbo_ranges
# Initialize optimizer and utility function
dbo = DiscreteBayesianOptimization(f=None,
prange=prange,
verbose=verbose,
random_state=random_state,
constraints=self.constraints)
if verbose:
dbo._prime_subscriptions()
dbo.dispatch(Events.OPTMIZATION_START)
# Register past data to optimizer
self.update_points_and_targets()
for idx, point in enumerate(self.points):
dbo.register(params=point, target=self.targets[idx])
if verbose and idx % batch_size == 0: dbo.dispatch(Events.BATCH_END)
# Register running data to partner space in optimizer
running_points = self.get_running_points()
for idx, point in enumerate(running_points):
if idx == 0:
dbo.partner_register(params=point, clear=True)
else:
dbo.partner_register(params=point, clear=False)
# Fit gaussian process
data['random_state'] = np.random.get_state()
if len(dbo.space) > 0:
dbo.output_space('dbo_space.csv')
#self.output_space('exp_space.csv')
start_time = time()
dbo.fit_gp()
print("Model trained in {:8.2f} minutes".format((time()-start_time)/60))
if any(dbo._gp.kernel_.k1.k1.length_scale<5e-3):
print("Warning: Very short length scale detected when fitting Matern kernel. Retraining model...")
start_time = time()
dbo.fit_gp()
print("Model trained in {:8.2f} minutes".format((time() - start_time) / 60))
if any(dbo._gp.kernel_.k1.k1.length_scale>5e2):
print("Warning: Very long length scale detected when fitting Matern kernel.")
print("Model length scales:")
for key, value in dict(zip(dbo.space.keys, dbo._gp.kernel_.k1.k1.length_scale)).items():
print("{}: {:8.4f}".format(key, value))
print("Model noise: {}".format(dbo._gp.kernel_.k2.noise_level))
print("Model constant scale: {}".format(dbo._gp.kernel_.k1.k2.constant_value))
# Refresh queue and copy old model
self.read_batch_number()
fname = os.path.join(self.directory_path, 'optimizer.pickle')
if os.path.isfile(fname):
copyfile(fname,
os.path.join(self.directory_path, 'models', 'state_{}.pickle'.format(self.batch_number - 1)))
# Build dictionary to save and return model
data['processed_files'] = list(self.parser.processed_files.keys())
data['model'] = dbo
data['uuid'] = uuid.uuid4()
with open(fname, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
return dbo
def generate_batch(self, batch_size=BATCH, verbose=0, random_state=None, utility_kind="ucb", kappa=2.5, xi=0.0,
sampler='greedy', **kwargs):
'''
Creates optimizer, registers all previous data, and generates a proposed batch.
Arguments
----------
batch_size: integer number of points to suggest per batch
verbose: 0 (quiet), 1 (printing only maxima as found), 2 (print every registered point)
random_state: integer for random number generator
utility_kind: Utility function to use ('ucb', 'ei', 'poi')
kappa: float, necessary for 'ucb' utility function
xi: float, translation of gaussian function
**kwargs: dictionary passed to suggestion function. See bayes_opt.parallel_opt.disc_acq_max() for options
Returns
----------
batch: list of dictionaries containing parameters for each variable in the experiment
'''
batch = []
# Update kwargs
if sampler == 'greedy' or sampler == 'capitalist':
kwargs['complements'] = bool(self.complements)
# Initialize optimizer and utility function
fname = os.path.join(self.directory_path, 'optimizer.pickle')
if os.path.isfile(fname):
with open(fname, 'rb') as handle:
data = pickle.load(handle)
dbo = data['model']
running_points = self.get_running_points()
for point in running_points:
dbo.partner_register(params=point, clear=False)
self.model_uuid = data['uuid']
else:
dbo = self.generate_model(verbose=verbose, random_state=random_state)
self.model_uuid = self.get_saved_model_uuid()
utility = UtilityFunction(kind=utility_kind, kappa=kappa, xi=xi)
# Generate batch of suggestions
dbo.reset_rng()
batch = dbo.suggest(utility, sampler=sampler, n_acqs=batch_size, fit_gp=False, **kwargs)
# Clear and re-register running data to partner space in optimizer (can be adjusted in capitalist)
running_points = self.get_running_points()
for idx, point in enumerate(running_points):
if idx == 0:
dbo.partner_register(params=point, clear=True)
else:
dbo.partner_register(params=point, clear=False)
for point in batch:
self.complement_mapping(point)
return batch
def register_mini_batch(self, mini_batch):
'''
Submit the mini_batch to the workflow.
'''
self.read_batch_number()
if len(mini_batch) != self.MINI_BATCH:
print("Warning! You are not submitting the right amount of measurements per mini-batch.")
batch_name = self.name + '-' + "{:0>4d}".format(self.batch_number)
self.parser.submit_mini_batch(batch_name, mini_batch, self.liquids)
self.batch_number += 1
self.write_batch_number()
def queue_size(self):
'''
Simply checks the number of files available in the queue
Note: (1) I use 'exp_name in first_line' to check whether
the file belongs to our experiment. Thus, it is better
to use some fixed prefix for ml-driven runs
'''
queue_size = 0
folder_path = self.directory_path + 'runqueue/'
for f in os.listdir(folder_path):
if os.path.isfile(os.path.join(folder_path, f)) and os.path.splitext(f)[1] == '.run':
try:
with open(os.path.join(folder_path, f), "r") as file_input:
if self.name in file_input.readline():
queue_size += 1
except IOError:
print("One of the queue files was not processed. Check Experiment.queue_size.")
except UnicodeDecodeError:
print("Unreadable files in queue. Potentially system files polluting space.")
return queue_size
def write_batch_number(self):
'''writes out file for state tracking'''
try:
with open(self.directory_path + 'optimizer.state', "w") as f:
f.write(str(self.batch_number))
except IOError:
print("Failed to save the batch number in the optimizer.state.")
print(f"Current batch number is {self.batch_number}")
def read_batch_number(self):
'''reads state file for updated batch number'''
try:
with open(self.directory_path + 'optimizer.state', "r") as f:
self.batch_number = int(f.readline().rstrip())
except IOError:
print("No state file present. Generating new one.")
self.batch_number = 1
self.write_batch_number()
def clean_queue(self):
'''
This will clear the queue of experimental files with the experiment name.
Used primarily when a fresh model is generated, during active workflow time.
'''
self.read_batch_number()
folder_path = os.path.join(self.directory_path, 'runqueue/')
for f in os.listdir(folder_path):
clean = False
if os.path.isfile(os.path.join(folder_path, f)) and os.path.splitext(f)[1] == '.run':
try:
with open(os.path.join(folder_path, f), "r") as file_input:
if self.name in file_input.readline() \
and self.name + '-0' not in file_input.readline(): # not deleting manually submitted files
clean = True
except IOError:
print("One of the queue files was not processed ({:s}). Check Experiment.clean_queue.".format(f))
except UnicodeDecodeError:
print("Unreadable files in queue. Potentially system files polluting space.")
if clean:
os.remove(os.path.join(folder_path, f))
self.batch_number -= 1
print("The queue has been cleared.\n", end='Time is ')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
self.write_batch_number()
def get_running_points(self):
'''
Check whether there are experiments in the runque or active running, and return list.
This purposefully ignores '_dispensed' values, since this shouldn't be relevant until completed.
'''
dfs = self.parser.process_running(self.name)
skipped = 0
_points = []
for df in dfs:
for idx, row in df.iterrows():
point = {}
if self.skip_point(row):
skipped = skipped + 1
else:
for comp in self.compounds:
if self.rng[comp]['lo'] < self.rng[comp]['hi']:
if comp in df:
point[comp] = row[comp]
continue
else:
if comp in self.identical_compounds.keys():
found_alternative = False
for alternative in self.identical_compounds[comp].keys():
if alternative in df:
# print(
# 'found_alternative ' + alternative + ' in ' + row['Name'] +
# ' for compound' + comp + ' with final concentration ' + str(
# float(row[alternative]) * float(
# self.identical_compounds[comp][alternative])))
found_alternative = True
point[comp] = float(row[alternative]) * float(
self.identical_compounds[comp][alternative])
continue
if not found_alternative:
point[comp] = 0
else:
point[comp] = 0
point[comp] = 0
self.complement_mapping(point)
_points.append(point)
if skipped != 0:
print('Warning: Ignored ' + str(skipped) + ' points in running folder.')
return _points
def skip_point(self, point):
'''
Exclude any points that contain compounds that are not under consideration:
i.e. filter non-variable compounds that are != 0
Returns false if there is any compounds in the sample that are not under consideration
'''
for key, value in point.iteritems():
# case 0: sample is leaking
if key == 'oxygen_evolution_micromol' and value > 5:
print('Warning, skipping leaky point ' + point['Name'])
return True
# case 1: standard column that is not representing a compound
if (key in {'SampleIndex', 'SampleNumber', 'Name', 'vial_capped', 'gc_well_number',
'hydrogen_evolution', 'oxygen_evolution', 'hydrogen_evolution_micromol',
'oxygen_evolution_micromol', 'water', 'water_dispensed',
'internal_hydrogen_standard_micromol', 'weighted_hydrogen_micromol', 'sample_location_weight',
'weighted_is_sl_hydrogen_evolution_micromol'}) \
or 'Unnamed' in key: # deal with faulty comma
continue
# case 2: column directly representing variable
if (key in self.compounds) or ((len(key) > 10) and (key[:-10] in self.compounds)):
# case 2.5: column representing control only/ compound not included in experiment.
if key in self.rng and self.rng[key]['hi'] <= 0 and value>0:
#print('Warning, ignoring point with ' + key + ' and value ' + str(value))
return True
else:
continue
# case 3: column representing variable but with different concentration for compound in list
skip = False;
for compound in self.compounds:
if compound in self.identical_compounds and \
((key in self.identical_compounds[compound].keys()) or (
(len(key) > 10) and (key[:-10] in self.identical_compounds[compound].keys()))):
# print('Found compound that is a variable, but with different concentration' + key)
skip = True
continue
if skip:
continue
# case 4: column is unclear, but value is 0
if value == 0:
continue
# case 5: column not representing variable compound in list
# print('Warning, ignoring point with ' + key + ' and value ' + str(value))
return True
# no unexpected columns found
return False
def update_points_and_targets(self):
'''
Check whether there are new available measurements.
If there are, then fill in self.points and self.targets
Note (1) 'Name_dispensed' has preference over simply 'Name'
(2) Silently ignores values of all other compounds!
(3) For now we kick out rows with Nan's in 'hydrogen_evolution'
'''
for filename in self.parser.process_completed_folder(self.name):
# print(filename)
frame = self.parser.processed_files[filename]
frame.dropna(subset=['hydrogen_evolution'],
inplace=True) # Update on a later date for a more appropriate handling
# print(filename, self.parser.processed_files[filename].tail())
print(f"Adding data from {filename} to the list of points: {len(frame)} measurements.")
f_targets = list(self.optimisation_target(frame))
skipped = 0
for idx, row in frame.iterrows():
point = {}
skip_point = self.skip_point(row)
if skip_point:
skipped = skipped + 1
else:
for comp in self.compounds:
if self.rng[comp]['lo'] < self.rng[comp]['hi']:
if comp + '_dispensed' in frame:
point[comp] = row[comp + '_dispensed']
continue
if comp in frame:
point[comp] = row[comp]
continue
if comp in self.identical_compounds.keys():
found_alternative = False
for alternative in self.identical_compounds[comp].keys():
# there seem to be nan values if the batch has any comments => ignore them
if (alternative + '_dispensed') in frame and not math.isnan(row[alternative]):
# print(
# 'found_alternative ' + alternative + ' in ' + row['Name'] +
# ' for compound' + comp + ' with final value ' +
# str(float(row[alternative+'_dispensed'])
# * float(self.identical_compounds[comp][alternative])))
found_alternative = True
point[comp] = float(row[alternative + '_dispensed']) * float(
self.identical_compounds[comp][alternative])
continue
elif alternative in frame and not math.isnan(row[alternative]):
# print(
# 'found_alternative ' + alternative + ' in ' + row['Name'] +
# ' for compound' + comp + ' with final value ' + str(
# float(row[alternative]) * float(self.identical_compounds[comp][alternative])))
found_alternative = True
point[comp] = float(row[alternative]) * float(
self.identical_compounds[comp][alternative])
continue
if not found_alternative:
point[comp] = 0
else:
point[comp] = 0
# print(f"Warning! {comp} was not found in the file {filename}")
self.complement_mapping(point)
# ### TEST BLOCK ###
# print("Using test block in update_points_and_targets. This should not be in deployment")
# if np.random.uniform() < 1:
# self.points.append(point)
# self.targets.append(f_targets[idx])
# ### TEST BLOCK ###
### REAL BLOCK ###
self.points.append(point)
self.targets.append(f_targets[idx])
### REAL BLOCK ###
if skipped != 0:
print('Warning: Ignored ' + str(skipped) + ' points.')
assert len(self.targets) == len(self.points), "Missmatch in points and targets. "\
"Error in Experiment.update_points_and_targets"
print('Total number of points in model: ' + str(len(self.points)))
def optimisation_target(self, frame):
return frame['hydrogen_evolution_micromol']
def new_model_available(self):
new_uuid = self.get_saved_model_uuid()
return not (self.model_uuid == new_uuid)
def get_saved_model_uuid(self):
fname = os.path.join(self.directory_path, 'optimizer.pickle')
if os.path.isfile(fname):
with open(fname, 'rb') as handle:
data = pickle.load(handle)
new_uuid = data['uuid']
return new_uuid;
return uuid.uuid4()
def clean_and_generate(exp, batches_to_generate, multiprocessing=1, perform_clean=False, sampler='greedy'):
if (perform_clean):
exp.clean_queue()
KMBBO_args = {'multiprocessing': multiprocessing,
'n_slice': 500}
greedy_args = {'multiprocessing': multiprocessing,
'n_iter': 500,
'n_warmup': 10000,
'kappa': 1.5}
capitalist_args = {'multiprocessing': multiprocessing,
'exp_mean': 2.5,
'n_splits': 14,
'n_iter': 250,
'n_warmup': 1000
}
start_time = time()
### Choose your own adventure ###
if sampler == 'KMBBO':
batch = exp.generate_batch(batch_size=batches_to_generate * (exp.MINI_BATCH - len(exp.controls)),
sampler='KMBBO', **KMBBO_args)
elif sampler == 'greedy':
batch = exp.generate_batch(batch_size=batches_to_generate * (exp.MINI_BATCH - len(exp.controls)),
sampler='greedy', **greedy_args)
elif sampler == 'capitalist':
batch = exp.generate_batch(batch_size=batches_to_generate * (exp.MINI_BATCH - len(exp.controls)),
sampler='capitalist', **capitalist_args)
else:
raise ValueError("No sampler named {}".format(sampler))
print("Batch was generated in {:.2f} minutes. Submitting.\n".format((time() - start_time) / 60))
# add constants
for i in range(len(batch)):
batch[i].update(exp.constants)
for i in range(batches_to_generate):
exp.register_mini_batch(batch[i * (exp.MINI_BATCH - len(exp.controls)):(i + 1) * (
exp.MINI_BATCH - len(exp.controls))] + exp.controls)
def watch_completed(lag_time=900):
'''
Monitors completed folder, and generates model with a lag time
Arguments
--------
lag_time: interger, seconds to wait after newly discovered file to generate model
This lag should be greater than the lag between completed MINI_BATCHES in a BATCH.
'''
exp = Experiment()
completed_dir = os.path.join(exp.directory_path, 'completed')
n_files = 0
for f in os.listdir(completed_dir):
if os.path.isfile(os.path.join(completed_dir, f)): n_files += 1
# Automatically generate model at restart
exp.clear_previous_model()
while True:
count = 0
for f in os.listdir(completed_dir):
if os.path.isfile(os.path.join(completed_dir, f)):
count += 1
if count > n_files:
print("New completed files detected. Waiting {} seconds to train new model.".format(lag_time))
n_files = count
sleep(lag_time)
exp.generate_model()
print(
"New model trained. Old model has been saved as ./models/state_{}.pickle".format(exp.batch_number - 1))
sleep(Experiment.SLEEP_DELAY)
def watch_queue(multiprocessing=1, sampler='greedy'):
'''
Monitors runqueue folder, and generates a batch based on existing model
or creates a fresh model if none exists.
'''
exp = Experiment()
exp.model_uuid = exp.get_saved_model_uuid()
while True:
# case 1: not enough batches in queue
if exp.queue_size() < exp.BATCH_FILES:
print("There are less than required files in the queue. Generating a new batches.\n", end='Time is ')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
missing_files = exp.BATCH_FILES - exp.queue_size()
clean_and_generate(exp, missing_files, multiprocessing, False, sampler)
# case 2: new model
elif exp.new_model_available():
print("A new model has been generated. Generating new batches.\n", end='Time is ')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
clean_and_generate(exp, exp.BATCH_FILES, multiprocessing, True, sampler)
sleep(Experiment.SLEEP_DELAY)
if __name__ == "__main__":
# try:
# p1 = multiprocessing.Process(target=watch_completed, args=(360,)) #Delay for model building when finding new data
# p1.start()
# sleep(Experiment.SLEEP_DELAY)
# p2 = multiprocessing.Process(target=watch_queue, args=(7,'capitalist',)) #CPUs used for batch generation and sampler choice, Search strategy
# p2.start()
# except:
# tb = traceback.format_exc()
# print(tb)
# ### DEBUGINING LINES ###
# p1 = multiprocessing.Process(target=watch_completed, args=(900,)) #Delay for model building when finding new data
# p1.start()
# sleep(Experiment.SLEEP_DELAY)
# p2 = multiprocessing.Process(target=watch_queue, args=(4,'KMBBO',)) #CPUs used for batch generation
# p2.start()
# ## IN SERIAL ###
try:
os.remove('optimizer.pickle') # Clean start
except OSError:
pass
watch_queue(1, 'capitalist')
## DEBUGING LINES ###
|
server.py
|
import socket
from threading import Thread
import sys
import signal
MESSAGE_SIZE = 1024
sock = None
separator_token = "<SEP>" # we will use this to separate the client name & message
client_sockets = None
def sigterm_handler(_signum, _frame) -> None:
sys.exit(1)
def clean_up():
global sock
global client_sockets
# close client sockets
print("Clean up")
if not (client_sockets is None):
for cs in client_sockets:
cs.close()
# close server socket
if not (sock is None):
sock.close()
def listen_for_client(cs):
"""
This function keep listening for a message from `cs` socket
Whenever a message is received, broadcast it to all other connected clients
"""
global separator_token
global client_sockets
while True:
try:
# keep listening for a message from `cs` socket
msg = cs.recv(MESSAGE_SIZE).decode()
except Exception as e:
# client no longer connected
# remove it from the set
print(f"[!] Error: {e}")
print(f"Remove a socket")
client_sockets.remove(cs)
else:
# if we received a message, replace the <SEP>
# token with ": " for nice printing
msg = msg.replace(separator_token, ": ")
# iterate over all connected sockets
for client_socket in client_sockets:
# and send the message
client_socket.send(msg.encode())
def run_server():
global sock
global client_sockets
# server's IP address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5002 # port we want to use
# initialize list/set of all connected client's sockets
client_sockets = set()
# create a TCP socket
sock = socket.socket()
# make the port as reusable port
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to the address we specified
sock.bind((SERVER_HOST, SERVER_PORT))
# listen for upcoming connections
sock.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
while True:
print(f"Wait a connection")
# we keep listening for new connections all the time
client_socket, client_address = sock.accept()
print(f"[+] {client_address} connected.")
# add the new connected client to connected sockets
client_sockets.add(client_socket)
# start a new thread that listens for each client's messages
thr = Thread(target=listen_for_client, args=(client_socket,))
# make the thread daemon so it ends whenever the main thread ends
thr.daemon = True
# start the thread
thr.start()
def main():
# 強制終了のシグナルを受け取ったら、強制終了するようにします
signal.signal(signal.SIGTERM, sigterm_handler)
try:
run_server()
finally:
# 強制終了のシグナルを無視するようにしてから、クリーンアップ処理へ進みます
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
clean_up()
# 強制終了のシグナルを有効に戻します
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
# このファイルを直接実行したときは、以下の関数を呼び出します
if __name__ == "__main__":
sys.exit(main())
|
_script_docker_python_loop_example.py
|
# type: ignore[attr-defined]
"""
This is a simplified example script which demonstrates the concept of how the XSOAR Server executes python integrations/scripts.
The XSOAR Server will run the docker container with a python script which it will use to execute the integration/script.
The communication with the script is done over stdin/stdout/stderr using a json based protocol. You can use this script
to experiment with simple scenarios to see how the exection is performed.
For example to run a simple script which sends a log entry to the server via calling: `demisto.log(...)` run the following:
echo '{"script": "demisto.log(\"this is an example entry log\")", "integration": false, "native": false}' | \
docker run --rm -i -v `pwd`:/work -w /work demisto/python3:3.8.6.12176 python Utils/_script_docker_python_loop_example.py
"""
import os
import threading
import sys
import json
import traceback
if sys.version_info[0] < 3:
import Queue as queue # pylint: disable=E0401
else:
import queue
__read_thread = None
__input_queue = None
win = sys.platform.startswith('win')
if win:
__input_queue = queue.Queue()
def read_input_loop():
global __input_queue
while True:
line = sys.stdin.readline()
__input_queue.put(line)
if line == '':
break
def __readWhileAvailable():
if win:
# An ugly solution - just open a blocking thread to handle input
global __input_queue
global __read_thread
if not __read_thread:
__read_thread = threading.Thread(target=read_input_loop)
__read_thread.daemon = True
__read_thread.start()
buff = ''
# Now, read from the queue. First read we block and wait and then wait for timeout.
buff += __input_queue.get()
return buff
else:
# Wait for the first char from stdin
buff = sys.stdin.readline()
# While available, read all the other chars
return buff
"""Demisto instance for scripts only"""
template_code = '''
from __future__ import print_function
import json
import uuid
import sys
class Demisto:
"""Wrapper class to interface with the Demisto server via stdin, stdout"""
def __init__(self, context):
self.callingContext = context
args = self.args()
if 'demisto_machine_learning_magic_key' in args:
import os
os.environ['DEMISTO_MACHINE_LEARNING_MAGIC_KEY'] = args['demisto_machine_learning_magic_key']
def log(self, msg):
json.dump({'type': 'entryLog', 'args': {'message': msg}}, sys.stdout)
sys.stdout.write('\\n')
sys.stdout.flush()
def investigation(self):
return self.callingContext[u'context'][u'Inv']
def incidents(self):
return self.callingContext[u'context'][u'Incidents']
def parentEntry(self):
return self.callingContext[u'context'][u'ParentEntry']
def context(self):
return self.callingContext[u'context'][u'ExecutionContext']
def args(self):
return self.callingContext.get(u'args', {})
def uniqueFile(self):
return str(uuid.uuid4())
def getFilePath(self, id):
return self.__do({'type': 'getFileByEntryID', 'command': 'getFilePath', 'args': {'id': id}})
def getLicenseID(self):
return self.__do({'type': 'executeCommand', 'command': 'getLicenseID', 'args': {}})['id']
def get(self, obj, field):
""" Get the field from the given dict using dot notation """
parts = field.split('.')
for part in parts:
if obj and part in obj:
obj = obj[part]
else:
return None
return obj
def gets(self, obj, field):
return str(self.get(obj, field))
def getArg(self, arg):
return self.get(self.callingContext, 'args.' + arg)
def execute(self, module, command, args):
return self.__do({'type': 'execute', 'module': module, 'command': command.strip(), 'args': args})
def executeCommand(self, command, args):
return self.__do({'type': 'executeCommand', 'command': command.strip(), 'args': args})
def demistoUrls(self):
return self.__do({'type': 'demistoUrls'})
def info(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'info', 'args': argsObj})
def error(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'error', 'args': argsObj})
def exception(self, ex):
return self.__do({'type': 'exception', 'command': 'exception', 'args': ex})
def debug(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'debug', 'args': argsObj})
def getAllSupportedCommands(self):
return self.__do({'type': 'getAllModulesSupportedCmds'})
def getModules(self):
return self.__do({'type': 'getAllModules'})
def setContext(self, name, value):
return self.__do({'type': 'setContext', 'name': name, 'value': value})
def dt(self, data, q):
return self.__do({'type': 'dt', 'name': q, 'value': data})['result']
def __do(self, cmd):
# Watch out there is another defintion like this
# prepare command to send to server
json.dump(cmd, sys.stdout)
sys.stdout.write('\\n')
# send command to Demisto server
sys.stdout.flush()
# wait to receive response from Demisto server
data = globals()['__readWhileAvailable']()
if data.find('$$##') > -1:
raise ValueError(data[4:])
return json.loads(data)
def convert(self, results):
""" Convert whatever result into entry """
if type(results) is dict:
if 'Contents' in results and 'ContentsFormat' in results:
return results
else:
return {'Type': 1, 'Contents': json.dumps(results), 'ContentsFormat': 'json'}
if type(results) is list:
res = []
for r in results:
res.append(self.convert(r))
return res
if sys.version_info.major >= 3 and type(results) is bytes:
return {'Type': 1, 'Contents': results.decode('utf-8'), 'ContentsFormat': 'text'}
return {'Type': 1, 'Contents': str(results), 'ContentsFormat': 'text'}
def results(self, results):
res = []
converted = self.convert(results)
if type(converted) is list:
res = converted
else:
res.append(converted)
json.dump({'type': 'result', 'results': res}, sys.stdout)
sys.stdout.write('\\n')
sys.stdout.flush()
demisto = Demisto(context)
try:
import __builtin__
from StringIO import StringIO
except ImportError:
# Python 3
import builtins as __builtin__
from io import StringIO
def demisto_print(*args):
global demisto
output = StringIO()
__builtin__.print(*args, file=output)
result = output.getvalue().strip()
demisto.log(result)
print = demisto_print
###CODE_HERE###
'''
"""Demisto instance for integrations only"""
integ_template_code = '''
from __future__ import print_function
import json
import uuid
import sys
class Demisto:
"""Wrapper class to interface with the Demisto server via stdin, stdout"""
def __init__(self, context):
self.callingContext = context
args = self.args()
if 'demisto_machine_learning_magic_key' in args:
import os
os.environ['DEMISTO_MACHINE_LEARNING_MAGIC_KEY'] = args['demisto_machine_learning_magic_key']
def log(self, msg):
json.dump({'type': 'entryLog', 'args': {'message': 'Integration log: ' + msg}}, sys.stdout)
sys.stdout.write('\\n')
sys.stdout.flush()
def investigation(self):
return self.callingContext[u'context'][u'Inv']
def incidents(self):
return self.callingContext[u'context'][u'Incidents']
def parentEntry(self):
return self.callingContext[u'context'][u'ParentEntry']
def context(self):
return self.callingContext[u'context'][u'ExecutionContext']
def integrationInstance(self):
return self.callingContext[u'context'][u'IntegrationInstance']
def args(self):
return self.callingContext.get(u'args', {})
def uniqueFile(self):
return str(uuid.uuid4())
def getFilePath(self, id):
return self.__do({'type': 'getFileByEntryID', 'command': 'getFilePath', 'args': {'id': id}})
def getLastRun(self):
return self.__do({'type': 'executeCommand', 'command': 'getLastRun', 'args': {}})
def setLastRun(self, value):
return self.__do({'type': 'executeCommand', 'command': 'setLastRun', 'args': {'value': value}})
def getIntegrationContext(self):
return self.__do({'type': 'executeCommand', 'command': 'getIntegrationContext', 'args': {}})
def setIntegrationContext(self, value):
return self.__do({'type': 'executeCommand', 'command': 'setIntegrationContext', 'args': {'value': value}})
def getLicenseID(self):
return self.__do({'type': 'executeCommand', 'command': 'getLicenseID', 'args': {}})['id']
def params(self):
return self.callingContext.get(u'params', {})
def command(self):
return self.callingContext.get(u'command', '')
def get(self, obj, field):
""" Get the field from the given dict using dot notation """
parts = field.split('.')
for part in parts:
if obj and part in obj:
obj = obj[part]
else:
return None
return obj
def demistoUrls(self):
return self.__do({'type': 'demistoUrls'})
def info(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'info', 'args': argsObj})
def error(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'error', 'args': argsObj})
def debug(self, *args):
argsObj = {}
argsObj["args"] = list(args)
self.__do({'type': 'log', 'command': 'debug', 'args': argsObj})
def gets(self, obj, field):
return str(self.get(obj, field))
def getArg(self, arg):
return self.get(self.callingContext, 'args.' + arg)
def getParam(self, p):
return self.get(self.callingContext, 'params.' + p)
def dt(self, data, q):
return self.__do({'type': 'dt', 'name': q, 'value': data})['result']
def __do(self, cmd):
# Watch out there is another defintion like this
json.dump(cmd, sys.stdout)
sys.stdout.write('\\n')
sys.stdout.flush()
data = globals()['__readWhileAvailable']()
if data.find('$$##') > -1:
raise ValueError(data[4:])
return json.loads(data)
def __convert(self, results):
""" Convert whatever result into entry """
if type(results) is dict:
if 'Contents' in results and 'ContentsFormat' in results:
return results
else:
return {'Type': 1, 'Contents': json.dumps(results), 'ContentsFormat': 'json'}
if type(results) is list:
res = []
for r in results:
res.append(self.__convert(r))
return res
if sys.version_info.major >= 3 and type(results) is bytes:
return {'Type': 1, 'Contents': results.decode('utf-8'), 'ContentsFormat': 'text'}
return {'Type': 1, 'Contents': str(results), 'ContentsFormat': 'text'}
def results(self, results):
res = []
converted = self.__convert(results)
if type(converted) is list:
res = converted
else:
res.append(converted)
json.dump({'type': 'result', 'results': res}, sys.stdout)
sys.stdout.write('\\n')
sys.stdout.flush()
def incidents(self, incidents):
self.results({'Type': 1, 'Contents': json.dumps(incidents), 'ContentsFormat': 'json'})
def credentials(self, credentials):
self.results({'Type': 1, 'Contents': json.dumps(credentials), 'ContentsFormat': 'json'})
demisto = Demisto(context)
try:
import __builtin__
from StringIO import StringIO
except ImportError:
# Python 3
import builtins as __builtin__
from io import StringIO
def demisto_print(*args):
global demisto
output = StringIO()
__builtin__.print(*args, file=output)
result = output.getvalue().strip()
demisto.log(result)
print = demisto_print
###CODE_HERE###
'''
# rollback file system to its previous state
# delete home dir and tmp dir
# notifies demisto server that the current executed script is completed
# and the process is ready to execute the next script
def send_script_completed():
json.dump({'type': 'completed'}, sys.stdout)
sys.stdout.write('\n')
sys.stdout.flush()
def send_script_exception(exc_type, exc_value, exc_traceback):
ex_string = traceback.format_exception(exc_type, exc_value, exc_traceback)
if ex_string == 'None\n':
ex_string = str(exc_value)
json.dump({'type': 'exception', 'args': {'exception': ex_string}}, sys.stdout)
sys.stdout.write('\n')
sys.stdout.flush()
def send_pong():
json.dump({'type': 'pong'}, sys.stdout)
sys.stdout.write('\n')
sys.stdout.flush()
# receives ping and sends back pong until we get something else
# the the function stopped and returns the received string
def do_ping_pong():
while True:
ping = __readWhileAvailable()
if ping == 'ping\n':
send_pong() # return pong to server to indicate that everything is fine
else:
return ping
backup_env_vars = {}
for key in os.environ:
backup_env_vars[key] = os.environ[key]
def rollback_system():
os.environ = {}
for key in backup_env_vars:
os.environ[key] = backup_env_vars[key]
while True:
contextString = do_ping_pong()
if contextString == '':
# finish executing python
break
contextJSON = json.loads(contextString)
code_string = contextJSON['script']
contextJSON.pop('script', None)
is_integ_script = contextJSON['integration']
complete_code = ''
if is_integ_script:
complete_code = integ_template_code.replace('###CODE_HERE###', code_string)
else:
complete_code = template_code.replace('###CODE_HERE###', code_string)
try:
code = compile(complete_code, '<string>', 'exec')
sub_globals = {
'__readWhileAvailable': __readWhileAvailable,
'context': contextJSON,
'win': win
}
exec(code, sub_globals, sub_globals) # guardrails-disable-line # pylint: disable=W0122
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
send_script_exception(exc_type, exc_value, exc_traceback)
except SystemExit:
# print 'Will not stop on sys.exit(0)'
pass
rollback_system()
# ping back to Demisto server that script is completed
send_script_completed()
# if the script running on native python then terminate the process after finished the script
is_python_native = contextJSON['native']
if is_python_native:
break
if __read_thread:
__read_thread.join(timeout=1)
|
using_tips_5.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
50个话题
9章
1.课程简介
2.数据结构相关话题
3.迭代器与生成器相关话题
4.字符串处理相关话题
5.文件I/O操作相关话题
6.数据编码与处理相关话题
7.类与对象相关话题
8.多线程与多进程相关话题
9.装饰器相关话题
"""
"""
第1章 课程简介
1-1 课程简介
1-2 在线编码工具WebIDE使用指南
第2章 数据结构与算法进阶训练
2-1 如何在列表, 字典, 集合中根据条件筛选数据
2-2 如何为元组中的每个元素命名, 提高程序可读性
2-3 如何统计序列中元素的出现频度
2-4 如何根据字典中值的大小, 对字典中的项排序
2-5 如何快速找到多个字典中的公共键(key)
2-6 如何让字典保持有序
2-7 如何实现用户的历史记录功能(最多n条)
第3章 对象迭代与反迭代技巧训练
3-1 如何实现可迭代对象和迭代器对象(1)
3-2 如何实现可迭代对象和迭代器对象(2)
3-3 如何使用生成器函数实现可迭代对象
3-4 如何进行反向迭代以及如何实现反向迭代
3-5 如何对迭代器做切片操作
3-6 如何在一个for语句中迭代多个可迭代对象
第4章 字符串处理技巧训练
4-1 如何拆分含有多种分隔符的字符串
4-2 如何判断字符串a是否以字符串b开头或结尾
4-3 如何调整字符串中文本的格式
4-4 如何将多个小字符串拼接成一个大的字符串
4-5 如何对字符串进行左, 右, 居中对齐
4-6 如何去掉字符串中不需要的字符
第5章 文件I/O高效处理技巧训练
5-1 如何读写文本文件
5-2 如何处理二进制文件
5-3 如何设置文件的缓冲
5-4 如何将文件映射到内存
5-5 如何访问文件的状态
5-6 如何使用临时文件
第6章 csv,json,xml,excel高效解析与构建技巧训练
6-1 如何读写csv数据
6-2 如何读写json数据
6-3 如何解析简单的xml文档
6-4 如何构建xml文档
6-5 如何读写excel文件
第7章 类与对象深度技术进阶训练
7-1 如何派生内置不可变类型并修改实例化行为
7-2 如何为创建大量实例节省内存
7-3 如何让对象支持上下文管理
7-4 如何创建可管理的对象属性
7-5 如何让类支持比较操作
7-6 如何使用描述符对实例属性做类型检查
7-7 如何在环状数据结构中管理内存
7-8 如何通过实例方法名字的字符串调用方法
第8章 多线程编程核心技术应用进阶训练
8-1 如何使用多线程
8-2 如何线程间通信
8-3 如何在线程间进行事件通知
8-4 如何使用线程本地数据
8-5 如何使用线程池
8-6 如何使用多进程
第9章 装饰器使用技巧进阶训练
9-1 如何使用函数装饰器
9-2 如何为被装饰的函数保存元数据
9-3 如何定义带参数的装饰器
9-4 如何实现属性可修改的函数装饰器
9-5 如何在类中定义装饰器
"""
"""
8-1 如何使用多线程
python只适合处理IO密集型操作, 不能达到真正意义上的多线程操作
实际案例:
http://table.finance.yahoo.com/table.csv?s=000001.sz我们通过雅虎网站获取了中国股市某支股票csv数据文件,现在要下载多只股票的csv数据,并将其转换为xml文件
如何使用线程来提高下载并处理的效率?
解决方案:
使用标准库threading.Thread创建线程,在每一个线程中下载并转换一只股票数据
"""
'''
import csv
from xml.etree.ElementTree import Element,ElementTree
import requests
from io import StringIO
def download(url):
response = requests.get(url,timeout=3)
if response.ok:
# 可支持文件操作的内存对象
return StringIO(response.content)
def xml_pretty(e,level=0):
if len(e) > 0:
e.text = '\n' + '\t' * (level + 1)
for child in e:
xml_pretty(child,level + 1)
child.tail = child.tail[:-1]
e.tail = '\n' + '\t' * level
def csvToXml(scsv,fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h:h.replace(' ',''),headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag,text in zip(headers,row):
e = Element(tag)
e.text = text
eRow.append(e)
xml_pretty(root)
et = Element(root)
et.write(fxml)
if __name__ == '__main__':
url = 'http://table.finance.yahoo.com/table.csv?s=000001.sz'
rf = download(url)
if rf:
with open('000001.xml','w') as wf:
csvToXml(rf,wf)
'''
'''
import csv
from xml.etree.ElementTree import Element,ElementTree
import requests
from io import StringIO
def download(url):
response = requests.get(url,timeout=3)
if response.ok:
# 可支持文件操作的内存对象
return StringIO(response.content)
def xml_pretty(e,level=0):
if len(e) > 0:
e.text = '\n' + '\t' * (level + 1)
for child in e:
xml_pretty(child,level + 1)
child.tail = child.tail[:-1]
e.tail = '\n' + '\t' * level
def csvToXml(scsv,fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h:h.replace(' ',''),headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag,text in zip(headers,row):
e = Element(tag)
e.text = text
eRow.append(e)
xml_pretty(root)
et = Element(root)
et.write(fxml)
if __name__ == '__main__':
for sid in range(1,11):
print('Download...(%d)' % sid)
url = 'http://table.finance.yahoo.com/table.csv?s=%s.sz'
url %= str(sid).rjust(6,'0')
rf = download(url)
if rf is None:continue
print('Convert to XML...(%d)' % sid)
fname = str(sid).rjust(6,'0') + '.xml'
with open(fname,'w')as wf:
csvToXml(rf,wf)
'''
'''
import csv
from xml.etree.ElementTree import Element,ElementTree
import requests
from io import StringIO
def download(url):
response = requests.get(url,timeout=3)
if response.ok:
# 可支持文件操作的内存对象
return StringIO(response.content)
def xml_pretty(e,level=0):
if len(e) > 0:
e.text = '\n' + '\t' * (level + 1)
for child in e:
xml_pretty(child,level + 1)
child.tail = child.tail[:-1]
e.tail = '\n' + '\t' * level
def csvToXml(scsv,fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h:h.replace(' ',''),headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag,text in zip(headers,row):
e = Element(tag)
e.text = text
eRow.append(e)
xml_pretty(root)
et = Element(root)
et.write(fxml)
def handle(sid):
print('Download...(%d)' % sid)
url = 'http://table.finance.yahoo.com/table.csv?s=%s.sz'
url %= str(sid).rjust(6, '0')
rf = download(url)
if rf is None: return
print('Convert to XML...(%d)' % sid)
fname = str(sid).rjust(6, '0') + '.xml'
with open(fname, 'w')as wf:
csvToXml(rf, wf)
from threading import Thread
# t = Thread(target=handle,args=(1,))
# t.start()
# print('main thread')
class MyThread(Thread):
def __init__(self,sid):
Thread.__init__(self)
self.sid = sid
def run(self):
handle(self.sid)
threads = []
for i in range(1,11):
t = MyThread(i)
threads.append(t)
# start会跳到run函数中
t.start()
for t in threads:
# 等待线程退出后再调用主函数
t.join()
print('main Thread')
'''
"""
8-2 如何线程间通信
实际案例:
http://table.finance.yahoo.com/table.csv?s=000001.sz我们通过雅虎网站获取了中国股市某支股票csv数据文件,现在要下载多只股票的csv数据,并将其转换为xml文件
由于全局解释器锁的存在,多线程进行CPU密集型操作并不能提高执行效率,我们修改程序架构:
1.使用多个DownloadThread线程进行下载(I/O操作)
2.使用一个ConvertThread线程进行转换(CPU密集型操作)
3.下载线程把下载数据安全地传递给转换线程
解决方案:
使用标准库中Queue.Queue,它是一个线程安全的队列,Download线程把下载数据放入队列,Convert线程从队列里提取数据
"""
"""
import csv
from xml.etree.ElementTree import Element, ElementTree
import requests
from io import StringIO
from threading import Thread
# 生产者、消费者模型
from queuelib import queue
q = queue()
def xml_pretty(e, level=0):
if len(e) > 0:
e.text = '\n' + '\t' * (level + 1)
for child in e:
xml_pretty(child, level + 1)
child.tail = child.tail[:-1]
e.tail = '\n' + '\t' * level
class DownloadThread(Thread):
def __init__(self,sid,queue):
Thread.__init__(self)
self.sid = sid
self.url = 'http://table.finance.yahoo.com/table.csv?s=%s.sz'
self.url %= str(sid).rjust(6, '0')
self.queue = queue
def download(self,url):
response = requests.get(url, timeout=3)
if response.ok:
# 可支持文件操作的内存对象
return StringIO(response.content)
def run(self):
print('Download',self.sid)
# 1.下载
data = self.download(self.url)
# 2.把sid和data传递给convert线程
# 加锁 lock
self.queue.put((self.uid,data))
class ConvertThread(Thread):
def __init__(self,queue):
Thread.__init__(self)
self.queue = queue
def csvToXml(self,scsv, fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h: h.replace(' ', ''), headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag, text in zip(headers, row):
e = Element(tag)
e.text = text
eRow.append(e)
xml_pretty(root)
et = ElementTree(root)
et.write(fxml)
def run(self):
while True:
sid,data = self.queue.get()
print('Convert',sid)
if sid == -1:
break
if data:
fname = str(sid).rjust(6,'0') + '.xml'
with open(fname, 'w')as wf:
self.csvToXml(data, wf)
q = queue()
dThreads = [DownloadThread(i,q) for i in range(1,11)]
cThread = ConvertThread(q)
for t in dThreads:
t.start()
cThread.start()
for t in dThreads:
t.join()
q.put((-1,None))
"""
"""
8-3 如何在线程间进行事件通知
实际案例:
http://table.finance.yahoo.com/table.csv?s=000001.sz,我们通过雅虎网站获取了中国股市某支股票csv数据文件,现在要下载多只股票的csv数据,并将其转换为xml文件
额外需求:
实现一个线程,将转换出的xml文件压缩打包,比如转换线程每生产出100个xml文件,就通知打包线程将它们打包成一个xxx.tgz文件,并删除xml文件,打包完成后,打包线程反过来通过转换线程,转换线程继续转换
解决方案:
线程间的事件通知,可以使用标准库中Threading.Event
1.等待事件一端调用wait,等待事件
2.通知事件一端调用set,通知事件
"""
"""
import csv
from xml.etree.ElementTree import Element, ElementTree
import requests
from io import StringIO
from threading import Thread,Event
from queuelib import queue
def xml_pretty(e, level=0):
if len(e) > 0:
e.text = '\n' + '\t' * (level + 1)
for child in e:
xml_pretty(child, level + 1)
child.tail = child.tail[:-1]
e.tail = '\n' + '\t' * level
class DownloadThread(Thread):
def __init__(self, sid, queue):
Thread.__init__(self)
self.sid = sid
self.url = 'http://table.finance.yahoo.com/table.csv?s=%s.sz'
self.url %= str(sid).rjust(6, '0')
self.queue = queue
def download(self, url):
response = requests.get(url, timeout=3)
if response.ok:
# 可支持文件操作的内存对象
return StringIO(response.content)
def run(self):
print('Download', self.sid)
# 1.下载
data = self.download(self.url)
# 2.把sid和data传递给convert线程
# 加锁 lock
self.queue.put((self.uid, data))
class ConvertThread(Thread):
def __init__(self, queue,cEvent,tEvent):
Thread.__init__(self)
self.queue = queue
self.cEvent = cEvent
self.tEvent = tEvent
def csvToXml(self, scsv, fxml):
reader = csv.reader(scsv)
headers = reader.next()
headers = map(lambda h: h.replace(' ', ''), headers)
root = Element('Data')
for row in reader:
eRow = Element('Row')
root.append(eRow)
for tag, text in zip(headers, row):
e = Element(tag)
e.text = text
eRow.append(e)
xml_pretty(root)
et = ElementTree(root)
et.write(fxml)
def run(self):
count = 0
while True:
sid, data = self.queue.get()
print('Convert', sid)
if sid == -1:
self.cEvent.set()
self.tEvent.wati()
break
if data:
fname = str(sid).rjust(6, '0') + '.xml'
with open(fname, 'w')as wf:
self.csvToXml(data, wf)
count += 1
if count == 5:
self.cEvent.set()
self.tEvent.wait()
self.tEvent.clear()
count = 0
import tarfile
import os
class TarThread(Thread):
def __init__(self,cEvent,tEvent):
Thread.__init__(self)
self.count = 0
self.cEvent = cEvent
self.tEvent = tEvent
self.setDaemon(True)
def tarXML(self):
self.count += 1
tfname = '%d.tgz' % self.count
tf = tarfile.open(tfname, 'w:gz')
for fname in os.listdir('.'):
if tfname.endswith('.xml'):
tf.add(fname)
os.remove(fname)
tf.close()
if not tf.members:
os.remove(tfname)
def run(self):
while True:
self.cEvent.wait()
self.tarXML()
self.cEvent.clear()
self.tEvent.set()
if __name__ == '__main__':
q = queue()
dThreads = [DownloadThread(i, q) for i in range(1, 11)]
cEvent = Event()
tEvent = Event()
cThread = ConvertThread(q,cEvent,tEvent)
tThread = TarThread(cEvent,tEvent)
tThread.start()
for t in dThreads:
t.start()
cThread.start()
for t in dThreads:
t.join()
q.put((-1, None))
print('main thread')
"""
"""
8-4 如何使用线程本地数据
实际案例:
我们实现了一个web视频监控服务器,服务器端采集摄像头数据,客户端使用浏览器通过http请求接收数据,服务器使用推送的方式(multipart/x-mixed-replace)一直使用一个tcp连接向客户端传递数据,这种方式将持续占用一个线程,导致单线程服务器无法处理多客户端请求
改写程序,在每个线程中处理一个客户端请求,支持多客户端访问
解决方案:
threading.local函数可以创建线程本地数据空间,其下属性对每个线程独立存在
"""
"""
import os,cv2,time,struct,threading
from socketserver import TCPServer,ThreadingTCPServer
from http.server import HTTPServer,BaseHTTPRequestHandler
from threading import Thread,RLock
from select import select
class JpegStreamer(Thread):
def __init__(self,camera):
Thread.__init__(self)
self.cap = cv2.VideoCapture(camera)
self.lock = RLock()
self.pipes = {}
def register(self):
pr,pw = os.pipe()
self.lock.acquire()
self.pipes[pr] = pw
self.lock.release()
return pr
def unregister(self,pr):
self.lock.acquire()
self.pipes.pop(pr)
self.lock.release()
pr.close()
pw.close()
def capture(self):
cap = self.cap
while cap.isOpened():
ret,frame = cap.read()
if ret:
ret,data = cv2.imencode('.jpg',frame,(cv2.IMWRITE_JPEG_QUALITY,40))
yield data.toString()
def send(self,frame):
n = struct.pack('l',len(frame))
self.lock.acquire()
if len(self.pipes):
_,pipes,_ = select([],self.pipes.itervalues(),[],1)
for pipe in pipes:
os.write(pipe,n)
os.write(pipe,frame)
self.lock.release()
def run(self):
for frame in self.capture():
self.send(frame)
class JpegRetriever(object):
def __init__(self,streamer):
self.streamer = streamer
self.local = threading.local()
def retriver(self):
while True:
ns = os.read(self.pipe,8)
n = struct.unpack('l',ns)[0]
data = os.read(self.local.pipe,n)
yield data
def __enter__(self):
if hasattr(self.local,'pipe'):
raise RuntimeError()
self.pipe = streamer.register()
return self.retriver()
def __exit__(self, *args):
self.streamer.unregister(self.local.pipe)
del self.local.pipe
return True
class Handler(BaseHTTPRequestHandler):
retriever = None
@staticmethod
def setJpegRetriever(retriever):
Handler.retriever = retriever
def do_Get(self):
if self.retriever is None:
raise RuntimeError('no retriver')
if self.path != '/':
return
self.send_response(200)
self.send_header("Content-type",'multipart/x-mixed-replace;boundary=abcde')
self.end_headers()
with self.retriever as frames:
for frame in frames:
self.send_frame(frame)
def send_frame(self):
self.write.write('--abcde\r\n')
self.write.write('Content-Type:image/jpeg\r\n')
self.write.write('Content-Length:%d\r\n\r\n' % len(frame))
self.write.write(frame)
if __name__ == '__main__':
streamer = JpegStreamer(0)
streamer.start()
retriever = JpegRetriever(streamer)
Handler.setJpegRetriever(retriever)
print('Start server...')
httpd = ThreadingTCPServer(('',9000),Handler)
httpd.serve_forever()
"""
"""
8-5 如何使用线程池
实际案例:
我们之前实现了一个多线程web视频监控服务器,我们需要对请求连接数做限制。以防止恶意用户发起大量连接而导致服务器创建大量线程,最终因资源耗尽而瘫痪。
可以使用线程池,替代原来的每次请求创建线程。
解决方案:
python3中有线程池实现
使用标准库中concurrent.futures下的ThreadPoolExector,对象的submit和map方法可以用来启动线程池中线程,执行任务
"""
"""
import os, cv2, time, struct, threading
from socketserver import TCPServer, ThreadingTCPServer
from http.server import HTTPServer, BaseHTTPRequestHandler
from concurrent.futures import ThreadPoolExecutor
from threading import Thread, RLock
from select import select
class JpegStreamer(Thread):
def __init__(self, camera):
Thread.__init__(self)
self.cap = cv2.VideoCapture(camera)
self.lock = RLock()
self.pipes = {}
def register(self):
pr, pw = os.pipe()
self.lock.acquire()
self.pipes[pr] = pw
self.lock.release()
return pr
def unregister(self, pr):
self.lock.acquire()
self.pipes.pop(pr)
self.lock.release()
pr.close()
pw.close()
def capture(self):
cap = self.cap
while cap.isOpened():
ret, frame = cap.read()
if ret:
ret, data = cv2.imencode('.jpg', frame, (cv2.IMWRITE_JPEG_QUALITY, 40))
yield data.toString()
def send(self, frame):
n = struct.pack('l', len(frame))
self.lock.acquire()
if len(self.pipes):
_, pipes, _ = select([], self.pipes.itervalues(), [], 1)
for pipe in pipes:
os.write(pipe, n)
os.write(pipe, frame)
self.lock.release()
def run(self):
for frame in self.capture():
self.send(frame)
class JpegRetriever(object):
def __init__(self, streamer):
self.streamer = streamer
self.local = threading.local()
def retriver(self):
while True:
ns = os.read(self.pipe, 8)
n = struct.unpack('l', ns)[0]
data = os.read(self.local.pipe, n)
yield data
def __enter__(self):
if hasattr(self.local, 'pipe'):
raise RuntimeError()
self.pipe = streamer.register()
return self.retriver()
def __exit__(self, *args):
self.streamer.unregister(self.local.pipe)
del self.local.pipe
return True
class Handler(BaseHTTPRequestHandler):
retriever = None
@staticmethod
def setJpegRetriever(retriever):
Handler.retriever = retriever
def do_Get(self):
if self.retriever is None:
raise RuntimeError('no retriver')
if self.path != '/':
return
self.send_response(200)
self.send_header("Content-type", 'multipart/x-mixed-replace;boundary=abcde')
self.end_headers()
with self.retriever as frames:
for frame in frames:
self.send_frame(frame)
def send_frame(self,frame):
s = '--abcde\r\n'
s += 'Content-Type:image/jpeg\r\n'
self.write.write(s.encode('ascii'))
self.write.write(frame)
class ThreadingPoolTCPServer(ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True,max_thread_num=100):
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
self.executor = ThreadPoolExecutor(max_thread_num)
def process_request(self, request, client_address):
self.executor.submit(self.process_request_thread,request,client_address)
if __name__ == '__main__':
streamer = JpegStreamer(0)
streamer.start()
retriever = JpegRetriever(streamer)
Handler.setJpegRetriever(retriever)
print('Start server...')
httpd = ThreadingPoolTCPServer(('', 9000), Handler,max_thread_num=3)
httpd.serve_forever()
"""
"""
8-6 如何使用多进程
实际案例:
由于python中全局解释器锁(GIL)的存在,在任意时刻只允许一个线程在解释器中运行,因此python的多线程不适合处理cpu密集型的任务
想要处理cpu密集型的任务,可以使用多进程模型
解决方案:
使用标准库中的multiprocessing.Process,它可以启动子进程执行任务,操作接口,进程间通信,进程间同步等都与Threading.Thread类似
"""
"""
from multiprocessing import Process
from threading import Thread
def isArmstrong(n):
a,t = [],n
while t > 0:
a.append(t % 10)
t /= 10
k = len(a)
return sum(x ** k for x in a) == n
def findArmstrong(a,b):
print(a,b)
res = [k for k in range(a,b) if isArmstrong(k)]
print('%s - %s:%s' % (a,b,res))
def findByThread(*argslist):
workers = []
for args in argslist:
worker = Thread(target=findArmstrong,args=args)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
def findByProcess(*argslist):
workers = []
for args in argslist:
worker = Process(target=findArmstrong,args=args)
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
if __name__ == '__main__':
import time
start = time.time()
# 多进程会更快
findByProcess((20000000,25000000),(25000000,30000000))
#findByThread((20000000,25000000),(25000000,30000000))
print(time.time() - start)
"""
"""
9-1 如何使用函数装饰器
实际案例:
某些时候我们想为多个函数,统一添加某种功能,比如计时统计,记录日志,缓存运算结果等等
我们不想在每个函数内--添加完全相同的代码,有什么好的解决方案?
解决方案:
定义装饰器函数,用它来生成一个在原函数基础添加了新功能的函数,替代原函数
题目1:斐波那契数列,又称黄金分割数列,指的是这样一个数列,1,1,2,3,5,8,13,21....,这个数列从第三项开始,每一项都等于前两项之和,求数列第n项
def fibonacci(n,cache=None):
if cache is None:
cache == {}
if n in cache:
return cache[n]
if n <= 1:
return 1
cache[n] = fibonacci(n - 1,cache) + fibonacci(n - 2,cache)
return cache[n]
def memo(func):
cache = {}
def wrap(*args):
if args not in cache:
cache[args] = func(*args)
return cache[args]
return wrap
@memo
def fibonacci(n):
if n <= 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
# fibonacci = memo(fibonacci) 这条和 @memo 等价
print(fibonacci(50))
题目2:一个共有10个台阶的楼梯,从下面走到上面,一次只能迈1-3个台阶,并且不能后退,走完这个楼梯共有多少种方法
@memo
def climb(n,steps):
count = 0
if n == 0:
count = 1
elif n > 0:
for step in steps:
count += climb(n - step, steps)
return count
print(climb(10,(1,2,3)))
"""
"""
9-2 如何为被装饰的函数保存元数据
实际案例:
在函数对象中保存着一些函数的元数据,例如:
f.__name__ : 函数的名字
f.__doc__ : 函数文档字符串
f.__module__ : 函数所属模块名
f.__dict__ : 属性字典
f.__defaults__ : 默认参数元组
我们在使用装饰器后,再使用上面这些属性访问时,看到的是内部包裹函数的元数据,原来函数的元数据便丢失掉了,应该如何解决?
解决方案:
使用标准库functools中的装饰器wraps装饰内部包裹函数,可以制定将原函数的某些属性,更新到包裹函数上面
def f(a):
'''f function'''
return a * 2
print(f.__name__)
g = f
print(g.__name__)
print(f.__doc__)
print(f.__module__)
print(f.__defaults__)
def f(a,b=1,c=[]):
print(a,b,c)
# defaults 保存默认参数
print(f.__defaults__)
f.__defaults__[1].append('abc')
# 默认参数尽量不要用可变对象 []
print(f(100))
print(f.__closure__)
def f():
a = 2
return lambda k: a ** k
g = f()
print(g.__closure__)
c = g.__closure__[0]
print(c.cell_contents)
from functools import update_wrapper,wraps,WRAPPER_ASSIGNMENTS,WRAPPER_UPDATES
def mydecorator(func):
@wraps(func)
def wrapper(*args,**kargs):
'''wrapper function'''
print('In wrapper')
func(*args, **kargs)
#update_wrapper(wrapper,func,('__name__','__doc__'),('__dict__',))
#update_wrapper(wrapper, func)
return wrapper()
@mydecorator
def example():
'''example function'''
print('In example')
print(example.__name__)
print(example.__doc__)
#print(WRAPPER_UPDATES)
#print(WRAPPER_ASSIGNMENTS)
"""
"""
9-3 如何定义带参数的装饰器
实际案例:
实现一个装饰器,它用来检查被装饰函数的参数类型,装饰器可以通过参数指明函数参数的类型,调用时如果检测出类型不匹配则抛出异常
@typeassert(str,int,int)
def f(a,b,c):
...
@typeassert(y=list)
def g(x,y):
...
解决方案:
提取函数签名:inspect.signature()
带参数的装饰器,也就是根据参数定制化一个装饰器,可以看成生产装饰器的工厂,每次调用typeassert,返回一个特定的装饰器,然后用它去修饰其他函数
from inspect import signature
def typeassert(*ty_args,**ty_kargs):
def decorator(func):
# func -> a,b
# d = {'a':int,'b':str}
sig = signature(func)
# 部分参数绑定
btypes = sig.bind_partial(*ty_args,**ty_kargs).arguments
def wrapper(*args,**kargs):
# arg in d,instance(arg,d[arg])
for name,obj in sig.bind(*args, **kargs).arguments.items():
if name in btypes:
if not isinstance(obj,btypes[name]):
raise TypeError('"%s" must be "%s"' %(name,btypes[name]))
return func(*args,**kargs)
return wrapper
return decorator
@typeassert(int,str,list)
def f(a,b,c):
print(a,b,c)
f(1,'abc',[1,2,3])
f(1,2,[1,2,3])
from inspect import signature
def f(a,b,c=1):
pass
sig = signature(f)
print(sig.parameters)
a = sig.parameters['a']
print(a.name)
print(a.kind)
print(a.default)
c = sig.parameters['c']
print(c.default)
print(sig.bind(str,int,int))
bargs = sig.bind(str,int,int)
print(bargs.arguments)
print(bargs.arguments['a'])
print(bargs.arguments['b'])
#print(sig.bind(str)) 做类型检查,参数必须填
print(sig.bind_partial(str))
# 只对部分参数校验
"""
"""
9-4 如何实现属性可修改的函数装饰器
实际案例:
为分析程序内哪些函数执行时间开销较大,我们定义一个带timeout参数的函数装饰器,装饰功能如下:
1.统计被装饰函数单次调用运行时间
2.时间大于参数timeout的,将此次函数调用记录到log日志中
3.运行时可修改timeout的值
解决方案:
为包裹函数增添一个函数,用来修改闭包中使用的自由变量,在python3中:使用non访问嵌套作用域中的变量引用
"""
"""
#python2
from functools import wraps
import time
import logging
def warn(timeout):
timeout = [timeout]
def decorator(func):
def wrapper(*args,**kargs):
start = time.time()
res = func(*args,**kargs)
used = time.time() - start
if used > timeout:
msg = '"%s": %s > %s' %(func.__name__,used,timeout[0])
logging.warn(msg)
return res
def setTimeout(k):
# nonlocal timeout
timeout[0] = k
wrapper.setTimeout = setTimeout
return wrapper
return decorator
from random import randint
@warn(1.5)
def test():
print('In test')
while randint(0,1):
time.sleep(0.5)
for _ in range(30):
test()
test.setTimeout(1)
for _ in range(30):
test()
"""
"""
#python3
from functools import wraps
import time
import logging
def warn(timeout):
def decorator(func):
def wrapper(*args, **kargs):
start = time.time()
res = func(*args, **kargs)
used = time.time() - start
if used > timeout:
msg = '"%s": %s > %s' % (func.__name__, used, timeout)
logging.warn(msg)
return res
def setTimeout(k):
nonlocal timeout
timeout = k
wrapper.setTimeout = setTimeout
return wrapper
return decorator
from random import randint
@warn(1.5)
def test():
print('In test')
while randint(0, 1):
time.sleep(0.5)
for _ in range(30):
test()
test.setTimeout(1)
for _ in range(30):
test()
"""
"""
9-5 如何在类中定义装饰器
实际案例:
实现一个能将函数调用信息记录到日志的装饰器:
1.把每次函数的调用时间,执行时间,调用次数写入日志
2.可以对被装饰器函数分组,调用信息记录到不同日志
3.动态修改参数,比如日志格式
4.动态打开关闭日志输出功能
解决方案:
为了让装饰器在使用上更为灵活,可以把类的实例方法作为装饰器,此时在包裹函数中就可以持有实例对象,便于修改属性和拓展功能
"""
"""
import logging
import time
from time import localtime,time,strftime,sleep
class CallingInfo(object):
def __init__(self,name):
log = logging.getLogger(name)
log.setLevel(logging.INFO)
fh = logging.FileHandler(name + '.log')
log.addHandler(fh)
log.info('Start'.center(50,'-'))
self.log = log
self.formatter = '%(func)s -> [%(time)s - %(used)s - %(ncalls)s]'
def info(self,func):
def wrapper(*args,**kargs):
wrapper.ncalls += 1
lt = localtime()
start = time()
res = func(*args,**kargs)
used = time() - start
info = {}
info['func'] = func.__name__
info['time'] = strftime('%x %X',lt)
info['used'] = used
info['ncalls'] = wrapper.ncalls
msg = self.formatter % info
self.log.info(msg)
return res
wrapper.ncalls = 0
return wrapper
def setFormatter(self,formatter):
self.formatter = formatter
def turnOn(self):
self.log.setLevel(logging.INFO)
def turnOff(self):
self.log.setLevel(logging.WARN)
cinfo1 = CallingInfo('mylog1')
cinfo2 = CallingInfo('mylog2')
cinfo1.setFormatter('%(func)s -> [%(time)s - %(ncalls)s]')
cinfo2.turnOff()
@cinfo1.info
def f():
print('in f')
@cinfo1.info
def g():
print('in g')
@cinfo2.info
def h():
print('in h')
from random import choice
for _ in range(50):
choice([f,g,h])()
sleep(choice([0.5,1,2]))
"""
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import cgi
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from concurrent import futures
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import custom_runtime
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import health_check_service
from google.appengine.tools.devappserver2 import http_proxy
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
try:
from google.appengine.tools.devappserver2 import java_runtime
except ImportError:
java_runtime = None
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import vm_runtime_factory
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
_MAX_UPLOAD_MEGABYTES = 32
_MAX_UPLOAD_BYTES = _MAX_UPLOAD_MEGABYTES * 1024 * 1024
_MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES = 64 * 1024 * 1024
_REDIRECT_HTML = '''\
<HTML><HEAD><meta http-equiv="content-type" content="%(content-type)s">
<TITLE>%(status)d Moved</TITLE></HEAD>
<BODY><H1>%(status)d Moved</H1>
The document has moved'
<A HREF="%(correct-url)s">here</A>.
</BODY></HTML>'''
_TIMEOUT_HTML = '<HTML><BODY>503 - This request has timed out.</BODY></HTML>'
# Factor applied to the request timeouts to compensate for the
# long vmengines reloads. TODO eventually remove that once we have
# optimized the vm_engine reload.
_VMENGINE_SLOWDOWN_FACTOR = 2
# polling time on module changes.
_CHANGE_POLLING_MS = 1000
# specific resources prefixes we don't want to see pollute the info level on
# access.
_QUIETER_RESOURCES = ('/_ah/health',)
# TODO: Remove after the Files API is really gone.
_FILESAPI_DEPRECATION_WARNING_PYTHON = (
'The Files API is deprecated and will soon be removed. Please use the'
' Google Cloud Storage Client library instead. Migration documentation is'
' available here: https://cloud.google.com/appengine/docs'
'/python/googlecloudstorageclient/migrate')
_FILESAPI_DEPRECATION_WARNING_JAVA = (
'The Google Cloud Storage Java API is deprecated and will soon be'
' removed. Please use the Google Cloud Storage Client library instead.'
' Migration documentation is available here: https://cloud.google.com'
'/appengine/docs/java/googlecloudstorageclient/migrate')
_FILESAPI_DEPRECATION_WARNING_GO = (
'The Files API is deprecated and will soon be removed. Please use the'
' Google Cloud Storage Client library instead. Documentation is'
' available here: https://cloud.google.com/appengine/docs'
'/go/googlecloudstorageclient')
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'php55': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
'custom': custom_runtime.CustomRuntimeInstanceFactory,
# TODO: uncomment for GA.
# 'vm': vm_runtime_factory.VMRuntimeInstanceFactory,
}
if java_runtime:
_RUNTIME_INSTANCE_FACTORIES.update({
'java': java_runtime.JavaRuntimeInstanceFactory,
'java7': java_runtime.JavaRuntimeInstanceFactory,
})
_MAX_REQUEST_WAIT_TIME = 10
def _get_wait_time(self):
"""Gets the wait time before timing out a request.
Returns:
The timeout value in seconds.
"""
if self.vm_enabled():
return self._MAX_REQUEST_WAIT_TIME * _VMENGINE_SLOWDOWN_FACTOR
return self._MAX_REQUEST_WAIT_TIME
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: Remove this when we have sandboxing disabled for all
# runtimes.
if (os.environ.get('GAE_LOCAL_VM_RUNTIME') and
module_configuration.runtime == 'vm'):
runtime = module_configuration.effective_runtime
else:
runtime = module_configuration.runtime
# TODO: a bad runtime should be caught before we get here.
if runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests to the
# dispatcher.
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self._dispatcher),
url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_server.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_server.Application(), url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
Raises:
ValueError: The runtime type is "custom" with vm: true and
--custom_entrypoint is not specified.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.server_port = self._balanced_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if (self._php_config and
self._module_configuration.runtime.startswith('php')):
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
if (self._java_config and
self._module_configuration.runtime.startswith('java')):
runtime_config.java_config.CopyFrom(self._java_config)
if self._vm_config:
runtime_config.vm_config.CopyFrom(self._vm_config)
# If the effective runtime is "custom" and --custom_entrypoint is not set,
# bail out early; otherwise, load custom into runtime_config.
if self._module_configuration.effective_runtime == 'custom':
if not self._custom_config.custom_entrypoint:
raise ValueError('The --custom_entrypoint flag must be set for '
'custom runtimes')
else:
runtime_config.custom_config.CopyFrom(self._custom_config)
runtime_config.vm = self._module_configuration.runtime == 'vm'
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
file_changes = self._watcher.changes(timeout)
if file_changes:
logging.info(
'[%s] Detected file changes:\n %s', self.name,
'\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=bool(file_changes))
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
custom_config: A runtime_config_pb2.CustomConfig instance. If None, or
'custom_entrypoint' is not set, then attempting to instantiate a
custom runtime module will result in an error.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._version = module_configuration.major_version
self._app_name_external = module_configuration.application_external_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._java_config = java_config
self._custom_config = custom_config
self._cloud_sql_config = cloud_sql_config
self._vm_config = vm_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
if self.vm_enabled():
self._RUNTIME_INSTANCE_FACTORIES['vm'] = (
vm_runtime_factory.VMRuntimeInstanceFactory)
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
# TODO: Remove after the Files API is really gone.
if self._module_configuration.runtime.startswith('python'):
self._filesapi_warning_message = _FILESAPI_DEPRECATION_WARNING_PYTHON
elif self._module_configuration.runtime.startswith('java'):
self._filesapi_warning_message = _FILESAPI_DEPRECATION_WARNING_JAVA
elif self._module_configuration.runtime.startswith('go'):
self._filesapi_warning_message = _FILESAPI_DEPRECATION_WARNING_GO
else:
self._filesapi_warning_message = None
def vm_enabled(self):
# TODO: change when GA
return self._vm_config
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._name
@property
def version(self):
"""The version of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._version
@property
def app_name_external(self):
"""The external application name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._app_name_external
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def runtime(self):
"""Runtime property for this module."""
return self._module_configuration.runtime
@property
def effective_runtime(self):
"""Effective_runtime property for this module."""
return self._module_configuration.effective_runtime
@property
def mvm_logs_enabled(self):
"""Returns True iff it's a Managed VM module and logs are enabled."""
return self._vm_config and self._vm_config.enable_logs
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/html')])
return [
'<html><head><title>Not Found</title></head>',
('<body>The url "%s" does not match any handlers.</body></html>' %
cgi.escape(environ['PATH_INFO']))
]
def _error_response(self, environ, start_response, status, body=None):
if body:
start_response(
'%d %s' % (status, httplib.responses[status]),
[('Content-Type', 'text/html'),
('Content-Length', str(len(body)))])
return body
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
runtime_config = self._get_runtime_config()
# Python monkey-patches out os.environ because some environment variables
# are set per-request (REQUEST_ID_HASE and REQUEST_LOG_ID for example).
# This means that although these environment variables could be set once
# at startup, they must be passed in during each request.
if (runtime_config.vm and
self._module_configuration.effective_runtime == 'python27'):
environ.update(http_runtime.get_vm_environment_variables(
self._module_configuration, runtime_config))
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.major_version,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
# TODO: Remove after the Files API is really gone.
if (self._filesapi_warning_message is not None
and self._request_data.was_filesapi_used(request_id)):
logging.warning(self._filesapi_warning_message)
self._insert_log_message(self._filesapi_warning_message,
2, request_id)
logservice.end_request(request_id, status_code, content_length)
if any(resource.startswith(prefix) for prefix in _QUIETER_RESOURCES):
level = logging.DEBUG
else:
level = logging.INFO
logging.log(level, '%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
content_length = int(environ.get('CONTENT_LENGTH', '0'))
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'DELETE', 'TRACE') and
content_length != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
# Do not apply request limits to internal _ah handlers (known to break
# blob uploads).
# TODO: research if _ah handlers need limits.
if (not environ.get('REQUEST_URI', '/').startswith('/_ah/') and
content_length > _MAX_UPLOAD_BYTES):
# As allowed by the RFC, cherrypy closes the connection for 413 errors.
# Most clients do not handle this correctly and treat the page as
# unavailable if the connection is closed before the client can send
# all the data. To match the behavior of production, for large files
# < 64M read the data to prevent the client bug from being triggered.
if content_length <= _MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES:
environ['wsgi.input'].read(content_length)
status = '%d %s' % (httplib.REQUEST_ENTITY_TOO_LARGE,
httplib.responses[httplib.REQUEST_ENTITY_TOO_LARGE])
wrapped_start_response(status, [])
return ['Upload limited to %d megabytes.' % _MAX_UPLOAD_MEGABYTES]
with self._handler_lock:
handlers = self._handlers
try:
path_info = environ['PATH_INFO']
path_info_normal = self._normpath(path_info)
if path_info_normal != path_info:
# While a 301 Moved Permanently makes more sense for non-normal
# paths, prod issues a 302 so we do the same.
return self._redirect_302_path_info(path_info_normal,
environ,
wrapped_start_response)
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(path_info)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
if logging.getLogger('').isEnabledFor(logging.DEBUG):
logging.exception('Request to %r failed', path_info)
else:
logging.error('Request to %r failed', path_info)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
return _THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
@staticmethod
def _quote_querystring(qs):
"""Quote a query string to protect against XSS."""
parsed_qs = urlparse.parse_qs(qs, keep_blank_values=True)
# urlparse.parse returns a dictionary with values as lists while
# urllib.urlencode does not handle those. Expand to a list of
# key values.
expanded_qs = []
for key, multivalue in parsed_qs.items():
for value in multivalue:
expanded_qs.append((key, value))
return urllib.urlencode(expanded_qs)
def _redirect_302_path_info(self, updated_path_info, environ, start_response):
"""Redirect to an updated path.
Respond to the current request with a 302 Found status with an updated path
but preserving the rest of the request.
Notes:
- WSGI does not make the fragment available so we are not able to preserve
it. Luckily prod does not preserve the fragment so it works out.
Args:
updated_path_info: the new HTTP path to redirect to.
environ: WSGI environ object.
start_response: WSGI start response callable.
Returns:
WSGI-compatible iterable object representing the body of the response.
"""
correct_url = urlparse.urlunsplit(
(environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
urllib.quote(updated_path_info),
self._quote_querystring(environ['QUERY_STRING']),
None))
content_type = 'text/html; charset=utf-8'
output = _REDIRECT_HTML % {
'content-type': content_type,
'status': httplib.FOUND,
'correct-url': correct_url
}
start_response('%d %s' % (httplib.FOUND, httplib.responses[httplib.FOUND]),
[('Content-Type', content_type),
('Location', correct_url),
('Content-Length', str(len(output)))])
return output
@staticmethod
def _normpath(path):
"""Normalize the path by handling . and .. directory entries.
Normalizes the path. A directory entry of . is just dropped while a
directory entry of .. removes the previous entry. Note that unlike
os.path.normpath, redundant separators remain in place to match prod.
Args:
path: an HTTP path.
Returns:
A normalized HTTP path.
"""
normalized_path_entries = []
for entry in path.split('/'):
if entry == '..':
if normalized_path_entries:
normalized_path_entries.pop()
elif entry != '.':
normalized_path_entries.append(entry)
return '/'.join(normalized_path_entries)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
Raises:
request_info.NotSupportedWithAutoScalingError: Always.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._java_config,
self._custom_config,
self._cloud_sql_config,
self._vm_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self, **kwargs):
"""Initializer for AutoScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
kwargs['vm_config'] = None
super(AutoScalingModule, self).__init__(**kwargs)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances,
name='Instance Adjustment')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
return _THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
self._adjust_instances()
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self, **kwargs):
"""Initializer for ManualScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(ManualScalingModule, self).__init__(**kwargs)
self._process_manual_scaling(self._module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes, name='Change Watcher')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._change_watcher_thread.join()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
future = self._async_start_instance(wsgi_servr, inst)
health_check_config = self.module_configuration.health_check
if (self.module_configuration.runtime == 'vm' and
health_check_config.enable_health_check and
'GAE_LOCAL_VM_RUNTIME' not in os.environ):
# Health checks should only get added after the build is done and the
# container starts.
def _add_health_checks_callback(unused_future):
return self._add_health_checks(inst, wsgi_servr, health_check_config)
future.add_done_callback(_add_health_checks_callback)
def _add_health_checks(self, inst, wsgi_servr, config):
do_health_check = functools.partial(
self._do_health_check, wsgi_servr, inst)
restart_instance = functools.partial(
self._restart_instance, inst)
health_checker = health_check_service.HealthChecker(
inst, config, do_health_check, restart_instance)
health_checker.start()
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
logging.info('New instance for module "%s" serving on:\nhttp://%s\n',
self.name, self.balanced_address)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except Exception, e: # pylint: disable=broad-except
logging.exception('Internal error while handling start request: %s', e)
def _do_health_check(self, wsgi_servr, inst, start_response,
is_last_successful):
is_last_successful = 'yes' if is_last_successful else 'no'
url = '/_ah/health?%s' % urllib.urlencode(
[('IsLastSuccessful', is_last_successful)])
environ = self.build_request_environ(
'GET', url, [], '', '', wsgi_servr.port,
fake_login=True)
return self._handle_request(
environ,
start_response,
inst=inst,
request_type=instance.NORMAL_REQUEST)
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
file_changes = self._watcher.changes(timeout)
if file_changes:
logging.info(
'[%s] Detected file changes:\n %s', self.name,
'\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
return _THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
# Just force instances to stop for a faster restart.
for inst in instances_to_stop:
inst.quit(force=True)
start_futures = [
self._async_start_instance(wsgi_servr, inst)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start)]
logging.info('Waiting for instances to restart')
health_check_config = self.module_configuration.health_check
for (inst, wsgi_servr) in zip(instances_to_start, wsgi_servers):
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(inst, wsgi_servr, health_check_config)
_, not_done = futures.wait(start_futures, timeout=_SHUTDOWN_TIMEOUT)
if not_done:
logging.warning('All instances may not have restarted')
else:
logging.info('Instances restarted')
def _restart_instance(self, inst):
"""Restarts the specified instance."""
with self._instances_change_lock:
# Quit the old instance.
inst.quit(force=True)
# Create the new instance.
new_instance = self._instance_factory.new_instance(inst.instance_id)
wsgi_servr = self._wsgi_servers[inst.instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
# Start the new instance.
self._start_instance(wsgi_servr, new_instance)
health_check_config = self.module_configuration.health_check
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(new_instance, wsgi_servr, health_check_config)
# Replace it in the module registry.
with self._instances_change_lock:
with self._condition:
self._instances[new_instance.instance_id] = new_instance
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class ExternalModule(Module):
"""A module with a single instance that is run externally on a given port."""
# TODO: reduce code duplication between the various Module classes.
def __init__(self, **kwargs):
"""Initializer for ManualScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(ExternalModule, self).__init__(**kwargs)
self._instance = None # Protected by self._condition.
self._wsgi_server = None # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of the instance.
self._instance_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes, name='Change Watcher')
# Override this method from the parent class
def _create_instance_factory(self, module_configuration):
return _ExternalInstanceFactory(
request_data=self._request_data,
module_configuration=module_configuration)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instance_change_lock:
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._change_watcher_thread.join()
self._balanced_module.quit()
self._wsgi_server.quit()
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
if instance_id != 0:
raise request_info.InvalidInstanceIdError()
return self._wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
return {self._instance}
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
environ['BACKEND_ID'] = (
self._module_configuration.module_name
if self._module_configuration.is_backend
else self._module_configuration.version_id.split('.', 1)[0])
return self._handle_instance_request(
environ, start_response, url_map, match, request_id,
inst or self._instance, request_type)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
inst = self._instance_factory.new_instance(0)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_server = wsgi_servr
self._instance = inst
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
logging.info('New instance for module "%s" serving on:\nhttp://%s\n',
self.name, self.balanced_address)
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def get_num_instances(self):
return 1
def set_num_instances(self, instances):
pass
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instance_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
self._wsgi_server.set_error(404)
return _THREAD_POOL.submit(
self._suspend_instance, self._instance, self._wsgi_server.port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instance_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
inst = self._instance_factory.new_instance(0, expect_ready_request=True)
self._instance = inst
self._wsgi_server.set_app(
functools.partial(self._handle_request, inst=inst))
self._port_registry.add(self._wsgi_server.port, self, inst)
self._async_start_instance(self._wsgi_server, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instance_change_lock:
with self._condition:
if self._quit_event.is_set():
return
inst = self._instance_factory.new_instance(0, expect_ready_request=True)
self._wsgi_server.set_app(
functools.partial(self._handle_request, inst=inst))
self._port_registry.add(self._wsgi_server.port, self, inst)
self._instance = inst
# Just force instance to stop for a faster restart.
inst.quit(force=True)
logging.info('Waiting for instances to restart')
self._start_instance(self._wsgi_server, inst)
logging.info('Instances restarted')
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
if instance_id == 0:
return self._instance
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class _ExternalInstanceFactory(instance.InstanceFactory):
"""Factory for instances that are started externally rather than by us."""
_MAX_CONCURRENT_REQUESTS = 20
# TODO: reconsider this
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='ignored',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='ignored',
login='admin')
def __init__(self, request_data, module_configuration):
super(_ExternalInstanceFactory, self).__init__(
request_data, self._MAX_CONCURRENT_REQUESTS)
self._module_configuration = module_configuration
def new_instance(self, instance_id, expect_ready_request=False):
assert instance_id == 0
proxy = _ExternalRuntimeProxy(self._module_configuration)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
class _ExternalRuntimeProxy(instance.RuntimeProxy):
def __init__(self, module_configuration):
super(_ExternalRuntimeProxy, self).__init__()
self._module_configuration = module_configuration
def start(self):
self._proxy = http_proxy.HttpProxy(
host='localhost', port=self._module_configuration.external_port,
instance_died_unexpectedly=lambda: False,
instance_logs_getter=lambda: '',
error_handler_file=application_configuration.get_app_error_file(
self._module_configuration),
prior_error=None)
self.handle = self._proxy.handle
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self, **kwargs):
"""Initializer for BasicScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(BasicScalingModule, self).__init__(**kwargs)
self._process_basic_scaling(self._module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances,
name='Change Watcher')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
return _THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
file_changes = self._watcher.changes(timeout)
if file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
custom_config: A runtime_config_pb2.CustomConfig instance. If None, or
'custom_entrypoint' is not set, then attempting to instantiate a
custom runtime module will result in an error.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
flask.py
|
from __future__ import annotations
import asyncio
import json
import logging
from asyncio import Queue as AsyncQueue
from queue import Queue as ThreadQueue
from threading import Event as ThreadEvent
from threading import Thread
from typing import Any, Callable, Dict, NamedTuple, Optional, Tuple, Union, cast
from urllib.parse import parse_qs as parse_query_string
from flask import Blueprint, Flask, redirect, request, send_from_directory, url_for
from flask_cors import CORS
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.websocket import WebSocket
from typing_extensions import TypedDict
import idom
from idom.config import IDOM_DEBUG_MODE, IDOM_WEB_MODULES_DIR
from idom.core.dispatcher import dispatch_single_view
from idom.core.layout import LayoutEvent, LayoutUpdate
from idom.core.types import ComponentConstructor, ComponentType
from .utils import CLIENT_BUILD_DIR, threaded, wait_on_event
logger = logging.getLogger(__name__)
class Config(TypedDict, total=False):
"""Render server config for :class:`FlaskRenderServer`"""
cors: Union[bool, Dict[str, Any]]
"""Enable or configure Cross Origin Resource Sharing (CORS)
For more information see docs for ``flask_cors.CORS``
"""
import_name: str
"""The module where the application instance was created
For more info see :class:`flask.Flask`.
"""
redirect_root_to_index: bool
"""Whether to redirect the root URL (with prefix) to ``index.html``"""
serve_static_files: bool
"""Whether or not to serve static files (i.e. web modules)"""
url_prefix: str
"""The URL prefix where IDOM resources will be served from"""
def PerClientStateServer(
constructor: ComponentConstructor,
config: Optional[Config] = None,
app: Optional[Flask] = None,
) -> FlaskServer:
"""Return a :class:`FlaskServer` where each client has its own state.
Implements the :class:`~idom.server.proto.ServerFactory` protocol
Parameters:
constructor: A component constructor
config: Options for configuring server behavior
app: An application instance (otherwise a default instance is created)
"""
config, app = _setup_config_and_app(config, app)
blueprint = Blueprint("idom", __name__, url_prefix=config["url_prefix"])
_setup_common_routes(blueprint, config)
_setup_single_view_dispatcher_route(app, config, constructor)
app.register_blueprint(blueprint)
return FlaskServer(app)
class FlaskServer:
"""A thin wrapper for running a Flask application
See :class:`idom.server.proto.Server` for more info
"""
_wsgi_server: pywsgi.WSGIServer
def __init__(self, app: Flask) -> None:
self.app = app
self._did_start = ThreadEvent()
@app.before_first_request
def server_did_start() -> None:
self._did_start.set()
def run(self, host: str, port: int, *args: Any, **kwargs: Any) -> None:
if IDOM_DEBUG_MODE.current:
logging.basicConfig(level=logging.DEBUG) # pragma: no cover
logger.info(f"Running at http://{host}:{port}")
self._wsgi_server = _StartCallbackWSGIServer(
self._did_start.set,
(host, port),
self.app,
*args,
handler_class=WebSocketHandler,
**kwargs,
)
self._wsgi_server.serve_forever()
run_in_thread = threaded(run)
def wait_until_started(self, timeout: Optional[float] = 3.0) -> None:
wait_on_event(f"start {self.app}", self._did_start, timeout)
def stop(self, timeout: Optional[float] = 3.0) -> None:
try:
server = self._wsgi_server
except AttributeError: # pragma: no cover
raise RuntimeError(
f"Application is not running or was not started by {self}"
)
else:
server.stop(timeout)
def _setup_config_and_app(
config: Optional[Config], app: Optional[Flask]
) -> Tuple[Config, Flask]:
return (
{
"url_prefix": "",
"cors": False,
"serve_static_files": True,
"redirect_root_to_index": True,
**(config or {}), # type: ignore
},
app or Flask(__name__),
)
def _setup_common_routes(blueprint: Blueprint, config: Config) -> None:
cors_config = config["cors"]
if cors_config: # pragma: no cover
cors_params = cors_config if isinstance(cors_config, dict) else {}
CORS(blueprint, **cors_params)
if config["serve_static_files"]:
@blueprint.route("/client/<path:path>")
def send_client_dir(path: str) -> Any:
return send_from_directory(str(CLIENT_BUILD_DIR), path)
@blueprint.route("/modules/<path:path>")
def send_modules_dir(path: str) -> Any:
return send_from_directory(str(IDOM_WEB_MODULES_DIR.current), path)
if config["redirect_root_to_index"]:
@blueprint.route("/")
def redirect_to_index() -> Any:
return redirect(
url_for(
"idom.send_client_dir",
path="index.html",
**request.args,
)
)
def _setup_single_view_dispatcher_route(
app: Flask, config: Config, constructor: ComponentConstructor
) -> None:
sockets = Sockets(app)
@sockets.route(_join_url_paths(config["url_prefix"], "/stream")) # type: ignore
def model_stream(ws: WebSocket) -> None:
def send(value: Any) -> None:
ws.send(json.dumps(value))
def recv() -> Optional[LayoutEvent]:
event = ws.receive()
if event is not None:
return LayoutEvent(**json.loads(event))
else:
return None
dispatch_single_view_in_thread(constructor(**_get_query_params(ws)), send, recv)
def _get_query_params(ws: WebSocket) -> Dict[str, Any]:
return {
k: v if len(v) > 1 else v[0]
for k, v in parse_query_string(ws.environ["QUERY_STRING"]).items()
}
def dispatch_single_view_in_thread(
component: ComponentType,
send: Callable[[Any], None],
recv: Callable[[], Optional[LayoutEvent]],
) -> None:
dispatch_thread_info_created = ThreadEvent()
dispatch_thread_info_ref: idom.Ref[Optional[_DispatcherThreadInfo]] = idom.Ref(None)
def run_dispatcher() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
thread_send_queue: "ThreadQueue[LayoutUpdate]" = ThreadQueue()
async_recv_queue: "AsyncQueue[LayoutEvent]" = AsyncQueue()
async def send_coro(value: Any) -> None:
thread_send_queue.put(value)
async def recv_coro() -> Any:
return await async_recv_queue.get()
async def main() -> None:
await dispatch_single_view(idom.Layout(component), send_coro, recv_coro)
main_future = asyncio.ensure_future(main())
dispatch_thread_info_ref.current = _DispatcherThreadInfo(
dispatch_loop=loop,
dispatch_future=main_future,
thread_send_queue=thread_send_queue,
async_recv_queue=async_recv_queue,
)
dispatch_thread_info_created.set()
loop.run_until_complete(main_future)
Thread(target=run_dispatcher, daemon=True).start()
dispatch_thread_info_created.wait()
dispatch_thread_info = cast(_DispatcherThreadInfo, dispatch_thread_info_ref.current)
assert dispatch_thread_info is not None
stop = ThreadEvent()
def run_send() -> None:
while not stop.is_set():
send(dispatch_thread_info.thread_send_queue.get())
Thread(target=run_send, daemon=True).start()
try:
while True:
value = recv()
if value is None:
stop.set()
break
# BUG: https://github.com/nedbat/coveragepy/issues/1012
# Coverage isn't able to support concurrency coverage for both threading and gevent
dispatch_thread_info.dispatch_loop.call_soon_threadsafe( # pragma: no cover
dispatch_thread_info.async_recv_queue.put_nowait, value
)
finally:
dispatch_thread_info.dispatch_loop.call_soon_threadsafe(
dispatch_thread_info.dispatch_future.cancel
)
return None
class _DispatcherThreadInfo(NamedTuple):
dispatch_loop: asyncio.AbstractEventLoop
dispatch_future: "asyncio.Future[Any]"
thread_send_queue: "ThreadQueue[LayoutUpdate]"
async_recv_queue: "AsyncQueue[LayoutEvent]"
class _StartCallbackWSGIServer(pywsgi.WSGIServer): # type: ignore
def __init__(
self, before_first_request: Callable[[], None], *args: Any, **kwargs: Any
) -> None:
self._before_first_request_callback = before_first_request
super().__init__(*args, **kwargs)
def update_environ(self) -> None:
"""
Called before the first request is handled to fill in WSGI environment values.
This includes getting the correct server name and port.
"""
super().update_environ()
# BUG: https://github.com/nedbat/coveragepy/issues/1012
# Coverage isn't able to support concurrency coverage for both threading and gevent
self._before_first_request_callback() # pragma: no cover
def _join_url_paths(*args: str) -> str:
# urllib.parse.urljoin performs more logic than is needed. Thus we need a util func
# to join paths as if they were POSIX paths.
return "/".join(map(lambda x: str(x).rstrip("/"), filter(None, args)))
|
test_docxmlrpc.py
|
from DocXMLRPCServer import DocXMLRPCServer
import httplib
import sys
from test import test_support
threading = test_support.import_module('threading')
import time
import socket
import unittest
PORT = None
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
test_support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn('<dl><dt><a name="-<lambda>"><strong>'
'<lambda></strong></a>(x, y)</dt></dl>',
response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
'<tt>Add two instances together. This '
'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
'PEP008</a>, but has nothing<br>\nto do '
'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
'RFC1952</a>. Case should matter: pEp008 '
'and rFC1952. Things<br>\nthat start '
'with http and ftp should be '
'auto-linked, too:<br>\n<a href="http://google.com">'
'http://google.com</a>.</tt></dd></dl>'), response.read())
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-system.listMethods"><strong>system.listMethods'
'</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system'
'.listMethods</a>() => [\'add\', \'subtract\','
' \'multiple\']<br>\n <br>\nReturns a list'
' of the methods supported by the'
' server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp">'
'<strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt>'
'<a href="#-system.methodHelp">system.methodHelp</a>(\'add\') '
'=> "Adds two integers together"<br>\n '
'<br>\nReturns a string containing documentation'
' for the specified method.</tt></dd></dl>\n '
'<dl><dt><a name="-system.methodSignature"><strong>system.'
'methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-'
'system.methodSignature">system.methodSignature</a>(\'add\') '
'=> [double, int, int]<br>\n <br>\nReturns'
' a list describing the signature of'
' the method. In the<br>\nabove example,'
' the add method takes two integers'
' as arguments<br>\nand returns a double'
' result.<br>\n <br>\nThis server does '
'NOT support system.methodSignature.</tt></dd></dl>'),
response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn("""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main()
|
server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
import os
import ctypes
import socket
import select
import struct
import logging
import threading
import multiprocessing
import time
import errno
import tvm._ffi
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
from tvm.runtime.module import load_module as _load_module
from tvm.contrib import utils
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
# pylint: disable=unused-import
from . import testing
from .base import TrackerCode
logger = logging.getLogger("RPCServer")
def _server_env(load_library, work_path=None):
"""Server environment function return temp dir"""
if work_path:
temp = work_path
else:
temp = utils.tempdir()
# pylint: disable=unused-variable
@tvm._ffi.register_func("tvm.rpc.server.workpath", override=True)
def get_workpath(path):
return temp.relpath(path)
@tvm._ffi.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
@tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True)
def download_linked_module(file_name):
"""Load module from remote side."""
# c++ compiler/linker
cc = os.environ.get("CXX", "g++")
# pylint: disable=import-outside-toplevel
path = temp.relpath(file_name)
if path.endswith(".o"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc
_cc.create_shared(path + ".so", path, cc=cc)
path += ".so"
elif path.endswith(".tar"):
# Extra dependencies during runtime.
from tvm.contrib import cc as _cc, tar as _tar
tar_temp = utils.tempdir(custom_path=path.replace(".tar", ""))
_tar.untar(path, tar_temp.temp_dir)
files = [tar_temp.relpath(x) for x in tar_temp.listdir()]
_cc.create_shared(path + ".so", files, cc=cc)
path += ".so"
elif path.endswith(".dylib") or path.endswith(".so"):
pass
else:
raise RuntimeError("Do not know how to link %s" % file_name)
logger.info("Send linked module %s to client", path)
return bytearray(open(path, "rb").read())
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library, work_path=None):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library, work_path)
_ffi_api.ServerLoop(sockfd)
if not work_path:
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(
tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]
)
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key": "server:" + rpc_key, "addr": (custom_addr, port)}
base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
work_path = utils.tempdir()
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(
target=_serve_loop, args=(conn, addr, load_library, work_path)
)
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
# pylint: disable=import-outside-toplevel
import psutil
parent = psutil.Process(server_proc.pid)
# terminate worker children
for child in parent.children(recursive=True):
child.terminate()
# terminate the worker
server_proc.terminate()
work_path.remove()
def _connect_proxy_loop(addr, key, load_library):
print("_connect_proxy_loop " + key)
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
if magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(target=_serve_loop, args=(sock, addr, load_library))
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
class PopenRPCServerState(object):
"""Internal PopenRPCServer State"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
):
# start update
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
if silent:
logger.setLevel(logging.ERROR)
if not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.thread = threading.Thread(
target=_listen_loop,
args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr),
)
self.thread.start()
else:
self.thread = threading.Thread(
target=_connect_proxy_loop, args=((host, port), key, load_library)
)
self.thread.start()
def _popen_start_rpc_server(
host,
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
):
if no_fork:
multiprocessing.set_start_method("spawn")
if server_init_callback:
server_init_callback()
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenRPCServerState(
host, port, port_end, is_proxy, tracker_addr, key, load_library, custom_addr, silent
)
PopenRPCServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
no_fork: bool, optional
Whether forbid fork in multiprocessing.
server_init_callback: Callable, optional
Additional initialization function when starting the server.
Note
----
The RPC server only sees functions in the tvm namespace.
To bring additional custom functions to the server env, you can use server_init_callback.
.. code:: python
def server_init_callback():
import tvm
# must import mypackage here
import mypackage
tvm.register_func("function", mypackage.func)
server = rpc.Server(host, server_init_callback=server_init_callback)
"""
def __init__(
self,
host="0.0.0.0",
port=9091,
port_end=9199,
is_proxy=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False,
no_fork=False,
server_init_callback=None,
):
try:
if _ffi_api.ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_rpc_server,
[
host,
port,
port_end,
is_proxy,
tracker_addr,
key,
load_library,
custom_addr,
silent,
no_fork,
server_init_callback,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
self.proc.kill()
self.proc = None
def __del__(self):
self.terminate()
|
main.py
|
""" MSE """
# import multiprocessing
import threading
import numpy as np
import os
import shutil
import matplotlib.pyplot as plt
import tensorflow as tf
import math
import pickle
import sympy as sym
from scipy import signal
import random
np.random.seed(42)
# PARAMETERS
OUTPUT_GRAPH = False # save logs
LOG_DIR = './log_pid' # savelocation for logs
MAX_EP_STEP = 200 # maximum number of steps per episode
MAX_GLOBAL_EP = 20_000 # total number of episodes
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10 # 10 # sets how often the global net is updated
GAMMA = 0.99 # 0.90dont use this # discount factor
ENTROPY_BETA = 1e-7 # 0.03 # entropy factor
LR_A = 0.0001 # 0.000001 # learning rate for actor
LR_C = 0.001 # 0.00001 # learning rate for critic
N_S = 1 # env.observation_space.shape[0] # number of states
N_A = 2 # env.action_space.shape[0] # number of actions
# A_BOUND = [np.array([0.02, 0.02]), np.array([4., 4.])] # [env.action_space.low, env.action_space.high] # action bounds
A_BOUND = [np.array([0.02, 0.02]), np.array([2., 2.])] # [env.action_space.low, env.action_space.high] # action bounds
# A_BOUND = [np.array([0.1, 0.1]), np.array([10., 10.])] # [env.action_space.low, env.action_space.high] # action bounds
# [np.array([0.01, 0.01]), np.array([1., 1.])] # action bounds
W1 = 0.025
W2 = 0.025
W3 = 6000.
W4 = 6000. # weights for kp, taui, CV, MV
CONSTRAIN_ALPHA = 5 # determines determines how much the flow rates can breach in total
CONSTRAIN_LR = 0.000001 # learning rate for the constrain
DISTURBANCE = False
TRAIN_CTRL = True # Train or not?
if TRAIN_CTRL:
ISOFFLINE = True
LOAD_WEIGHTS = False
SAVE_WEIGHTS = True
DETERMINISTIC = False
N_WORKERS = 1 # multiprocessing.cpu_count() # number of workers
else:
LOAD_WEIGHTS = True
DETERMINISTIC = False
SAVE_WEIGHTS = False
N_WORKERS = 1
ISOFFLINE = False
G = 983.991 # cm/s^2
PI = math.pi
prev_best = -180
prev_best_param = [1.2, 15]
# frange is an extension of python's range function that allows for non-integer step sizes
def frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
# signed square root is the same as a normal square root except the sign of the radicand is preserved
# for example signedSqrt(-4) = -2
def signedSqrt(x):
if x == 0:
return 0
else:
sign = x / abs(x)
return sign * abs(x) ** 0.5
class ThreeTankEnv(object):
def __init__(self, sess, setpoint, isoffline):
self.sess = sess
self.setpoint = setpoint # the list of set points for tank 1
self.Lambda = 0
self.C1 = 0 # Kp penalty # bug
self.C2 = 0 # taui penalty # bug
self.C3 = 0 # CV penalty # bug
self.C4 = 0 # MV penalty # bug
self.breach = 0
self.constrain_contribution = 0 # constrain to be multiplied by lambda
self.KP_MAX, self.TAU_MAX, self.MV_MAX, self.CV_MAX = 20, 20, 0.6, self.setpoint * 1.1
self.KP_MIN, self.TAU_MIN, self.MV_MIN, self.CV_MIN = 0, 0, 0, 0
self.height_T1_record = [] # list of Tank1 level
self.flowrate_T1_record = [] # list of Tank1 Flowrate
self.setpoint_T1_record = [] # list of Tank1 setpoints
self.kp_record = [] # list of Tank1 Kp
self.ti_record = [] # list of Tank1 Ti
self.ep_num = 1 # episode number
self.old_error1 = 0
self.new_error1 = 0
# To calculate MSE
self.error_sum = 0
self.no_of_error = 0
self.time_step = 0 # initial time_step
# To calculate Variance
self.flowrate_buffer = []
self.del_pids = []
# initialize kp1 and ti1 values
self.kp1 = 1.2
self.ti1 = 15
timespan = np.linspace(0, 100, 101)
omega = 0.3
# self.sinfunction = 10 * np.sin(omega * timespan) + 2 # SP varying gain
# self.sinfunction2 = 15 * np.sin(omega * timespan) + 6 # SP varying tau
self.sinfunction = 8 * np.sin(omega * timespan) + 2 # SP varying gain
self.sinfunction2 = 11 * np.sin(omega * timespan) + 6 # SP varying tau
self.processgain = self.sinfunction[int(setpoint)]
x = sym.Symbol('x')
self.processtau = self.sinfunction2[int(setpoint)]
type2 = sym.Poly((self.processtau * x + 1))
type2_c = list(type2.coeffs())
type2_c = np.array(type2_c, dtype=float)
sys2 = signal.TransferFunction([self.processgain], type2_c)
sys2 = sys2.to_ss()
sys2 = sys2.to_discrete(1)
self.isoffline = isoffline
if self.isoffline:
self.A = sys2.A * 0.9
self.B = sys2.B * 0.9
self.C = sys2.C * 0.9
else:
self.A = sys2.A
self.B = sys2.B
self.C = sys2.C
self.height_T1 = np.asarray([[self.setpoint - 1.]]) # water level of tank 1 in cm
self.xprime = np.asarray([[self.setpoint - 1.]])
self.flowrate_T1 = (self.C - self.A) / self.B
self.state_normalizer = 10. # 10
# resets the environment to initial values
def reinit_the_system(self):
timespan = np.linspace(0, 100, 101)
omega = 0.3
self.sinfunction = 8 * np.sin(omega * timespan) + 2 # 10
self.sinfunction2 = 11 * np.sin(omega * timespan) + 6 # 15 SP varying tau
self.processgain = self.sinfunction[int(self.setpoint)]
x = sym.Symbol('x')
self.processtau = self.sinfunction2[int(self.setpoint)]
# self.processtau = 20
type2 = sym.Poly((self.processtau * x + 1))
type2_c = list(type2.coeffs())
type2_c = np.array(type2_c, dtype=float)
sys2 = signal.TransferFunction([self.processgain], type2_c)
sys2 = sys2.to_ss()
sys2 = sys2.to_discrete(1)
if self.isoffline:
self.A = sys2.A * 0.9
self.B = sys2.B * 0.9
self.C = sys2.C * 0.9
else:
self.A = sys2.A
self.B = sys2.B
self.C = sys2.C
def reset_reward(self):
self.error_sum = 0
self.no_of_error = 0
self.flowrate_buffer = []
def reset(self):
# This method resets the model and define the initial values of each property
# self.height_T1 = np.asarray([[0.]]) # Values calculated to be stable at 35% flowrate (below first valve)
self.height_T1 = np.asarray([[self.setpoint - 1.]]) / self.C # water level of tank 1 in cm
self.xprime = np.asarray([[self.setpoint - 1.]]) / self.C
self.flowrate_T1 = (self.C - self.A) / self.B
self.Lambda = 0
self.C1 = 0 # Kp penalty
self.C2 = 0 # taui penalty
self.C3 = 0 # CV penalty
self.C4 = 0 # MV penalty
self.breach = 0
self.constrain_contribution = 0 # constrain to be multiplied by lambda
# initialize PID settings
self.kp1 = 1.2 # 1.2
self.ti1 = 15 # 15
self.time_step = 0 # initial time_step
self.old_error1 = 0 # initialize errors as zeros
# normalized error between the water level in tank 1 and the set point
self.error_sum = 0
self.no_of_error = 0
self.flowrate_buffer = []
error_T1 = self.setpoint - self.height_T1
self.no_of_error += 1 # Increament the number of error stored by 1
self.error_sum += np.square(error_T1) # Sum of error square
self.new_error1 = error_T1
self.height_T1_record = []
self.flowrate_T1_record = []
self.setpoint_T1_record = []
self.kp_record = []
self.ti_record = []
current_state = [self.setpoint / self.state_normalizer] # 100. is the max level
return np.asarray(current_state)
def update_pid(self, pi_parameters):
# This method update the pid settings based on the action
self.kp1 = pi_parameters[0]
self.ti1 = pi_parameters[1]
def pid_controller(self):
# This method calculates the PID results based on the errors and PID parameters.
# Uses velocity form of the euqation
del_fr_1 = self.kp1 * (self.new_error1 - self.old_error1 + self.new_error1 / self.ti1)
del_flow_rate = [del_fr_1]
# self.flowrate_1_buffer.append(del_fr_1)
return np.asarray(del_flow_rate)
def get_setpoints(self):
return self.setpoint
# changes the set points
def set_setpoints(self, setpoints_T1=None):
if setpoints_T1 is not None:
self.setpoint = setpoints_T1
# the environment reacts to the inputted action
def step(self, delta_flow_rate, disturbance=0):
# if no value for the valves is given, the valves default to this configuration
overflow = 0
pump_bound = 0
self.flowrate_T1 += delta_flow_rate[0] # updating the flow rate of pump 1 given the change in flow rate
if self.flowrate_T1 > 100:
pump_bound += abs(self.flowrate_T1 - 100)
elif self.flowrate_T1 < 0:
pump_bound += abs(self.flowrate_T1)
if disturbance == 5:
valves = [1, 1, 1, 1, 1, 1, 1, 0, 1]
else:
self.height_T1 = self.height_T1
valves = [1, 1, 1, 1, 1, 0, 1, 0, 1]
self.flowrate_T1 = np.clip(self.flowrate_T1, 0, 100) # bounds the flow rate of pump 1 between 0% and 100%
setpoint_T1 = self.setpoint
self.height_T1 = self.xprime
self.xprime = self.height_T1 * self.A + self.flowrate_T1 * self.B
self.height_T1 = self.height_T1 * self.C
self.height_T1 = np.clip(self.height_T1, 0, 43.1)
if disturbance == 1:
self.height_T1 = self.height_T1 + 0.1
elif disturbance == 2:
self.height_T1 = self.height_T1 + 0.3
elif disturbance == 3:
self.height_T1 = self.height_T1 + 0.5
elif disturbance == 4:
self.height_T1 = self.height_T1 + 1
else:
self.height_T1 = self.height_T1
if self.kp1 > self.KP_MAX:
self.C1 = abs(self.kp1 - self.KP_MAX)
elif self.kp1 < self.KP_MIN:
self.C1 = abs(self.kp1 - self.KP_MIN)
if self.ti1 > self.TAU_MAX:
self.C2 = abs(self.ti1 - self.TAU_MAX)
elif self.ti1 < self.TAU_MIN:
self.C2 = abs(self.ti1 - self.TAU_MIN)
if self.height_T1 > self.CV_MAX: # MV_MAX
self.C3 = abs(self.height_T1 - self.CV_MAX)
elif self.height_T1 < self.CV_MIN:
self.C3 = abs(self.height_T1 - self.CV_MIN)
if self.flowrate_T1 > self.MV_MAX: # MV_MAX
self.C4 = abs(self.flowrate_T1 - self.MV_MAX)
elif self.flowrate_T1 < self.MV_MIN:
self.C4 = abs(self.flowrate_T1 - self.MV_MIN)
self.constrain_contribution = np.float(abs(W1 * self.C1 + W2 * self.C2 + W3 * self.C3 + W4 * self.C4))
self.height_T1_record.append(self.height_T1.item())
self.flowrate_T1_record.append(self.flowrate_T1.item())
self.setpoint_T1_record.append(setpoint_T1)
self.kp_record.append(self.kp1) # store the current kp
self.ti_record.append(self.ti1) # store the current ti1
# calculates the difference between the current water level and its set point in tanks 1 and 3
# store error as old error since it will be updated soon
self.old_error1 = self.new_error1
error_T1 = setpoint_T1 - self.height_T1
self.no_of_error += 1
self.error_sum += np.square(error_T1)
self.new_error1 = error_T1
# normalizes the heights and errors and returns them as the environment's state
next_state = [self.setpoint / self.state_normalizer]
self.time_step += 1 # updates elapsed time
if self.time_step >= 1000: # terminates the process if the time elapsed reaches the maximum
done = True
self.ep_num += 1
else:
done = False
# returns the next state, reward, and if the episode has terminated or not
return np.asarray(next_state), done
def get_reward(self):
# This method calculates all required factors for reward calculation
mse = self.error_sum / self.no_of_error # Sum of error square over the number of errors
# var_action = np.var(self.flowrate_1_buffer) # Variance of change in flowrate
# next_reward_comp = [mse / MSE_MAX, var_action / VAR_MAX, self.breach[0] / EXPLORE_KP,
# self.breach[1] / EXPLORE_TI] # Normalized based on the max values
# reward = -W1 * abs(next_reward_comp[0]) - W2 * abs(next_reward_comp[1]) \
# - W3 * abs(next_reward_comp[2]) - W4 * abs(next_reward_comp[3])
reward = - mse.item() * 100 - self.Lambda * self.constrain_contribution
self.error_sum = 0
self.no_of_error = 0
self.flowrate_buffer = []
return reward
# Network for the Actor Critic
class ACNet(object):
def __init__(self, scope, sess, globalAC=None):
self.sess = sess
self.actor_optimizer = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA') # optimizer for the actor
self.critic_optimizer = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC') # optimizer for the critic
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_params, self.c_params = self._build_net(scope)[-2:] # parameters of actor and critic net
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S') # state
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A') # action
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget') # v_target value
mu, sigma, self.v, self.a_params, self.c_params = self._build_net(
scope) # get mu and sigma of estimated action from neural net
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
mu, sigma = mu * A_BOUND[1], sigma + 1e-4
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
if not DETERMINISTIC:
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), A_BOUND[0],
A_BOUND[1]) # sample a action from distribution
else:
self.A = tf.clip_by_value(mu, A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_grads = tf.gradients(self.a_loss,
self.a_params) # calculate gradients for the network weights
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'): # update local and global network weights
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = self.actor_optimizer.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = self.critic_optimizer.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self, scope): # neural network structure of the actor and critic
w_init = tf.random_normal_initializer(0., .1)
b_init = tf.zeros_initializer()
if LOAD_WEIGHTS:
# filename = 'Actor_Network_sin_1_baseline.pkl'
# filename1 = 'Critic_Network_sin_1_baseline.pkl'
loadnumber = 370701
filename = f'./Actor_Network_sin_{loadnumber:.0f}.pkl'
filename1 = f'./Critic_Network_sin_{loadnumber:.0f}.pkl'
with open(filename, 'rb') as f: # Python 3: open(..., 'rb')
actor_params_init = pickle.load(f)
f.close()
with open(filename1, 'rb') as f1: # Python 3: open(..., 'rb')
critic_params_init = pickle.load(f1)
f1.close()
w_la = tf.constant_initializer(actor_params_init[0])
b_la = tf.constant_initializer(actor_params_init[1])
w_mu = tf.constant_initializer(actor_params_init[2])
b_mu = tf.constant_initializer(actor_params_init[3])
w_sigma = tf.constant_initializer(actor_params_init[4])
b_sigma = tf.constant_initializer(actor_params_init[5])
w_lc = tf.constant_initializer(critic_params_init[0])
b_lc = tf.constant_initializer(critic_params_init[1])
w_v = tf.constant_initializer(critic_params_init[2])
b_v = tf.constant_initializer(critic_params_init[3])
else:
w_la = w_init
b_la = b_init
w_mu = w_init
b_mu = b_init
w_sigma = w_init
b_sigma = b_init
w_lc = w_init
b_lc = b_init
w_v = w_init
b_v = b_init
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_la, bias_initializer=b_la, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_mu, bias_initializer=b_mu,
name='mu') # estimated action value
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_sigma, bias_initializer=b_sigma,
name='sigma') # estimated variance
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_lc, bias_initializer=b_lc, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_v, bias_initializer=b_v,
name='v') # estimated value for state
a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
return mu, sigma, v, a_params, c_params
def update_global(self, feed_dict): # run by a local
self.sess.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
self.sess.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return self.sess.run(self.A, {self.s: s})[0]
# worker class that inits own environment, trains on it and updloads weights to global net
class Worker(object):
def __init__(self, name, globalAC, sess):
global ISOFFLINE
self.setpoint = 2
# self.env = ThreeTankEnv(sess, self.setpoint, ISOFFLINE) # make environment for each worker
self.name = name
self.AC = ACNet(name, sess, globalAC) # create ACNet for each worker
self.sess = sess
def work(self):
global global_rewards, global_constraints, global_episodes, prev_best, prev_best_param
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not coord.should_stop() and global_episodes < MAX_GLOBAL_EP:
# self.setpoint = random.choice([1, 2])
# self.setpoint = random.choice([1, 2])
# self.setpoint = random.choice([4, 5])
self.setpoint = random.choice([3, 4])
self.env = ThreeTankEnv(sess, self.setpoint, ISOFFLINE) # make environment for each worker
s = self.env.reset()
self.env.setpoint = self.setpoint - 1
for i in range(100):
_, _ = self.env.step(self.env.pid_controller())
self.env.setpoint = self.setpoint
self.env.reset_reward(), self.env.reinit_the_system()
ep_r = 0
ep_c = 0
for ep_t in range(MAX_EP_STEP):
a = self.AC.choose_action(s) # estimate stochastic action based on policy
# s_, r, done, info = self.env.step(a) # make step in environment
action_multiplier = [5, 5]
self.env.update_pid(action_multiplier * a)
for _ in range(1000):
s_, _ = self.env.step(self.env.pid_controller())
done = True
r = self.env.get_reward()/20
# done = True if ep_t == MAX_EP_STEP - 1 else False
print(f'{global_episodes:.0f}| r:{r:.2f}, c:{self.env.constrain_contribution:.2f}',
"|", action_multiplier * a)
ep_r += r
ep_c += self.env.constrain_contribution
# save actions, states and rewards in buffer
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.sess.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict) # actual training step, update global ACNet
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global() # get global parameters to local ACNet
s = s_
total_step += 1
if done:
if len(global_rewards) < 5: # record running episode reward
global_rewards.append(ep_r)
global_constraints.append(ep_c)
else:
global_rewards.append(ep_r)
global_rewards[-1] = (np.mean(global_rewards[-5:])) # smoothing
global_constraints.append(ep_c)
global_constraints[-1] = (np.mean(global_constraints[-5:])) # smoothing
Loss_c = (ep_c - CONSTRAIN_ALPHA) # bug
# print(f'test{self.env.Lambda + CONSTRAIN_LR * Loss_c}')
self.env.Lambda = max(0., self.env.Lambda + CONSTRAIN_LR * Loss_c) # bug
print(f'lambda={self.env.Lambda:.5f}')
savenumber = 370711
if global_episodes == MAX_GLOBAL_EP - 1:
AC_Saved_File = open(f'Reward_sin_{savenumber:.0f}.pkl', 'wb')
pickle.dump(global_rewards, AC_Saved_File)
AC_Saved_File.close()
AC_Saved_File2 = open(f'Constrain_sin_{savenumber:.0f}.pkl', 'wb')
pickle.dump(global_constraints, AC_Saved_File2)
AC_Saved_File2.close()
if global_rewards[-1] > prev_best:
prev_best_param = [self.env.kp1, self.env.ti1]
prev_best = global_rewards[-1]
print(
self.name,
"Ep:", global_episodes,
"| Ep_r: %i" % global_rewards[-1],
f'| KP: {self.env.kp1:.2f} taui: {self.env.ti1:.2f}'
f"| Best {prev_best:.0f}",
f"| Best_params: {prev_best_param[0]:.1f}, {prev_best_param[1]:.1f}"
)
if SAVE_WEIGHTS:
self.AC.pull_global() # get global parameters to local ACNet
saved_actor = self.sess.run(self.AC.a_params)
saved_critic = self.sess.run(self.AC.c_params)
fileName = f'Actor_Network_sin_{savenumber:.0f}.pkl'
fileName1 = f'Critic_Network_sin_{savenumber:.0f}.pkl'
AC_Saved_File = open(fileName, 'wb')
pickle.dump(saved_actor, AC_Saved_File)
AC_Saved_File.close()
AC_Saved_File = open(fileName1, 'wb')
pickle.dump(saved_critic, AC_Saved_File)
AC_Saved_File.close()
global_episodes += 1
break
if __name__ == "__main__":
global_rewards = []
global_constraints = []
global_episodes = 0
sess = tf.Session()
with tf.device("/cpu:0"):
global_ac = ACNet(GLOBAL_NET_SCOPE, sess) # we only need its params
workers = []
# Create workers
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, global_ac, sess))
coord = tf.train.Coordinator()
sess.run(tf.compat.v1.global_variables_initializer())
if OUTPUT_GRAPH: # write log file
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.compat.v1.summary.FileWriter(LOG_DIR, sess.graph)
worker_threads = []
for worker in workers: # start workers
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
coord.join(worker_threads) # wait for termination of workers
plt.figure()
plt.plot(np.arange(len(global_rewards)), global_rewards) # plot rewards
plt.xlabel('Episodes')
plt.ylabel('-MSE')
# plt.title(f"{GAMMA}")
plt.title("Episodic Returns")
plt.show()
plt.figure()
plt.plot(np.arange(len(global_constraints)), global_constraints) # plot rewards
plt.xlabel('Episodes')
plt.ylabel('-constraints')
# plt.title(f"{GAMMA}")
plt.title("Episodic Constraints")
plt.show()
|
Messenger_Client.py
|
import threading
from messenger_package.Messenger_Socket import *
from messenger_package.Messenger_Functions import *
class MessengerClient():
def __init__(self):
print("client init!")
self.message_header = ""
self.last_message_sent = ""
def start(self, addr, sock, username, user_id):
print()
try:
print("client start!")
user_thread = threading.Thread(
target=self.get_user_input, args=(sock,))
print("user_thread")
recieve_thread = threading.Thread(
target=self.receive_from_server, args=(sock,))
print("recieve_thread")
sock.connect(addr)
print("sock.connect(addr)")
connect_message = sock.recv(1024).decode('ascii')
if connect_message == 'INFO':
print("if connect_message == INFO:")
info = f"{user_id}::::{username}"
sock.send(info.encode('ascii'))
recieve_thread.start()
user_thread.start()
else:
print("Server has not accepted connection")
except:
print("Messenger_Client.py: Could not start messenger client")
sock.close()
quit()
def get_user_input(self, client):
while True:
try:
user_input = input("")
self.message_header = MessengerClient.parse_input(
client, self.message_header, user_input)
message = self.message_header + user_input
client.send(message.encode('ascii'))
self.last_message_sent = message
except:
break
def receive_from_server(self, client):
while True:
try:
recv_message = client.recv(1024).decode('ascii')
print(recv_message)
response = MessengerClient.parse_recieve(
client, recv_message, self.last_message_sent)
if recv_message != response:
self.message_header = response
except:
print("An exception occurred during message recieve")
client.close()
quit()
@staticmethod
def parse_recieve(client, recv_message, last_message_sent):
if recv_message.startswith('DIRECT'):
print(f"starting direct message with {last_message_sent[2:]}")
return f"{recv_message[10:]}::::"
elif recv_message == 'REFUSE' or recv_message == '':
client.close()
quit()
return recv_message
@staticmethod
def parse_input(client, message_header, input_message):
if input_message == "--exit":
client.close()
quit()
elif input_message == '--all':
return ""
return message_header
|
receiver_emu.py
|
#!/usr/bin/env python
###############################################################################
# Imports
###############################################################################
from gnuradio import zeromq
from gnuradio import gr
from gnuradio import blocks
from gnuradio import analog
from gnuradio import eng_notation
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
from numpy import genfromtxt
import numpy as np
import numpy.matlib
import sys
import os
import threading
import time
import socket
from grc_gnuradio import blks2 as grc_blks2
#from gnuradio import digital
sys.path.append("../python")
import rpc_manager as rpc_manager_local
import pmt
from gpsconfig import *
###############################################################################
# GNU Radio top_block
###############################################################################
class top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
self.c = 299700000
self.options = options
self.cnt = 0
self.run_loop = False
self.samp_rate = 100000000
self.hostname = os.uname()[1]
self.gps = "emu_"
self.id_rx = options.id_rx
self.noise_amp = 1/np.sqrt(np.power(10,options.snr/10))
self.modulation = options.modulation
self.seed = 10
self.delay = options.delay
self.samples_to_receive = 300
self.samples_to_receive_calibration = 1000
self.freq = 550000000
coordinates_string = options.coordinates_m.split(",")
self.coordinates = (float(coordinates_string[0]),float(coordinates_string[1]))
tx_coordinates_string = options.tx_coordinates.split(",")
self.tx_coordinates = np.array([float(tx_coordinates_string[0]),float(tx_coordinates_string[1])])
# socket addresses
rpc_port = 6665 + options.id_rx
rpc_adr = "tcp://*:" + str(rpc_port)
fusion_center_adr = "tcp://" + options.fusion_center + ":6665"
probe_port = 5555 + options.id_rx
probe_adr = "tcp://*:" + str(probe_port)
self.stp_cnt = 0
if options.movement_file != "EMPTY":
self.track_coordinates = genfromtxt(options.movement_file, delimiter=',')
print self.track_coordinates
self.tx_coordinates = self.track_coordinates[0,:]
else:
self.track_coordinates = np.array([])
# blocks
self.zmq_probe = zeromq.pub_sink(gr.sizeof_gr_complex, 1, probe_adr, 10, True)
self.mod_block = ModulatorBlock(self.seed, self.samp_rate, self.noise_amp, self.modulation, self.delay, self.samples_to_receive, self.freq, self.id_rx)
self.seed += 1
# connects
self.connect(self.mod_block, self.zmq_probe)
# ZeroMQ
self.rpc_manager = rpc_manager_local.rpc_manager()
self.rpc_manager.set_reply_socket(rpc_adr)
self.rpc_manager.set_request_socket(fusion_center_adr)
self.rpc_manager.add_interface("start_fg",self.start_fg)
self.rpc_manager.add_interface("set_gain",self.set_gain)
self.rpc_manager.add_interface("set_samp_rate",self.set_samp_rate)
self.rpc_manager.add_interface("set_bw",self.set_bw)
self.rpc_manager.add_interface("set_antenna",self.set_antenna)
self.rpc_manager.add_interface("get_gps_position",self.get_gps_position)
self.rpc_manager.add_interface("set_run_loop",self.set_run_loop)
self.rpc_manager.add_interface("sync_time",self.sync_time)
self.rpc_manager.add_interface("program_gps_position",self.program_gps_position)
self.rpc_manager.add_interface("stop_transmitter", self.stop_transmitter)
self.rpc_manager.add_interface("start_transmitter", self.start_transmitter)
self.rpc_manager.start_watcher()
# Find out ip address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if not options.ssh_proxy:
s.connect((options.fusion_center,6665))
else:
s.connect(("www.rwth-aachen.de",80))
self.ip_addr = s.getsockname()[0]
def set_run_loop(self, run_loop):
self.run_loop = run_loop
def set_samp_rate(self,samp_rate):
print "samp_rate set to", samp_rate
self.samp_rate = samp_rate
def set_bw(self,bw):
return
def set_gain(self,gain):
return
def set_antenna(self,antenna):
return
def sync_time(self):
print "Reset seed"
self.seed = 10
def stop_transmitter(self):
pass
def start_transmitter(self):
print "!!!!!!!!!!!!!!start transmitter called!!!!!!!!!!!!!"
self.run_loop = False
j = 0
while j < 3:
self.track_coordinates = np.delete(self.track_coordinates,0,0)
j += 1
self.seed += 3
def register_receiver(self):
first = True
while(True):
# register receiver [hostname, usrp_serial, rx_id]
self.rpc_manager.request("register_receiver",[self.ip_addr, self.hostname + "_emu" + str(options.id_rx), self.options.id_rx, self.gps, first, self.coordinates])
first = False
time.sleep(10)
def start_fg(self, samples_to_receive, freq, lo_offset, bw, gain, samples_to_receive_calibration, freq_calibration, lo_offset_calibration, bw_calibration, gain_calibration, time_to_recv, auto_calibrate, acquisitions, acquisition_time):
print "reception loop started"
threading.Thread(target = self.start_reception, args = (samples_to_receive, freq, lo_offset, bw, gain, samples_to_receive_calibration, freq_calibration, lo_offset_calibration, bw_calibration, gain_calibration, time_to_recv, auto_calibrate, acquisitions, acquisition_time)).start()
def start_reception(self, samples_to_receive, freq, lo_offset, bw, gain, samples_to_receive_calibration, freq_calibration, lo_offset_calibration, bw_calibration, gain_calibration, time_to_recv, auto_calibrate, acquisitions, acquisition_time):
print acquisitions
self.freq = freq
self.freq_calibration = freq_calibration
auto_delay = 5 # delay simulation of auto calibration
if acquisitions == 0:
infinity = True
else:
infinity = False
times = 1
if auto_calibrate: times = 2
# change loop structure? (confusing)
while True:
for i in range(0,times):
self.samples_to_receive = samples_to_receive
self.samples_to_receive_calibration = samples_to_receive_calibration
self.stop()
self.wait()
#self.disconnect(self.mod_block, self.zmq_probe)
# blocks
if i == 1:
print "Sending " + str(samples_to_receive_calibration) + " samples for autocalibtration"
delay = auto_delay
self.mod_block.next_vector(self.seed, self.samp_rate, self.noise_amp, self.modulation, delay, self.samples_to_receive, self.freq, self.id_rx)
else:
print "Sending " + str(samples_to_receive) + " samples"
if self.delay == 0:
# calculate delay from transmitter position
if len(self.track_coordinates):
# update target location for next acquisition
self.tx_coordinates = self.track_coordinates[0,:]
self.track_coordinates = np.delete(self.track_coordinates,0,0)
print self.tx_coordinates
delay = self.get_delay_from_location(self.tx_coordinates)
print "Delay is " + str(delay)
else:
delay = self.delay
print delay
self.mod_block.next_vector(self.seed, self.samp_rate, self.noise_amp, self.modulation, delay, self.samples_to_receive, self.freq, self.id_rx)
self.seed += 1
# connects
#self.connect(self.mod_block, self.zmq_probe)
self.start()
time.sleep(acquisition_time-0.05)
acquisitions -= 1
if not self.run_loop or (acquisitions <= 0 and not infinity):
break
time.sleep(acquisition_time)
def get_gps_position(self):
if self.options.coordinates_wgs84 != "":
coordinates_wgs84_string = self.options.coordinates_wgs84.split(",")
latitude = float(coordinates_wgs84_string[0])
longitude = float(coordinates_wgs84_string[1])
else:
longitude = 6.062
latitude = 50.7795
# basemap requires [long,lat]; we want to put in [lat,long] => swap
return [longitude, latitude]
def get_delay_from_location(self,transmitter_coordinates):
print self.samp_rate
delay=int((np.linalg.norm(np.array(list(self.coordinates))-transmitter_coordinates)/self.c)*self.samp_rate)
return delay
def program_gps_position(self, latitude, longitude, altitude):
# needed in ublox settings; by now we assume at least dm accuracy
ground_truth_accuracy = 0.1
set_ublox_coordinates_fixed(latitude, longitude, altitude, ground_truth_accuracy)
#add checker if it worked
class ModulatorBlock(gr.hier_block2):
def __init__(self, seed, samp_rate, noise_amp, modulation, delay, samples_to_receive, freq, rx_id):
gr.hier_block2.__init__(self, "ModulatorBlock",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
# Timing tag: This is preserved and updated:
timing_tag = gr.tag_t()
timing_tag.offset = 0
timing_tag.key = pmt.string_to_symbol('rx_time')
timing_tag.value = pmt.to_pmt((float(seed), 0.6))
timing_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
# Rx freq tags:
#print "In source emulation (before tag)"
#print freq
rx_freq_tag = gr.tag_t()
rx_freq_tag.offset = 0
rx_freq_tag.key = pmt.string_to_symbol('rx_freq')
rx_freq_tag.value = pmt.from_double(freq)
rx_freq_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
# Samp_rate tags:
rx_rate_tag = gr.tag_t()
rx_rate_tag.offset = 0
rx_rate_tag.key = pmt.string_to_symbol('rx_rate')
rx_rate_tag.value = pmt.from_double(samp_rate)
rx_rate_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
add = blocks.add_vcc(1, )
tag_debug = blocks.tag_debug(gr.sizeof_gr_complex*1, "", "")
tag_debug.set_display(True)
#if modulation == "bpsk":
# mod = digital.psk.psk_mod(
# constellation_points=2,
# mod_code="none",
# differential=True,
# samples_per_symbol=2,
# excess_bw=0.1,
# verbose=False,
# log=False,
# )
#else:
# mod = grc_blks2.packet_mod_b(digital.ofdm_mod(
# options=grc_blks2.options(
# modulation="qpsk",
# fft_length=4096,
# occupied_tones=200,
# cp_length=0,
# pad_for_usrp=False,
# log=None,
# verbose=None,
# ),
# ),
# payload_length=0,
# )
#print "in source emulation(after_tag)"
#print pmt.to_double(rx_freq_tag.value)
pulse_width = 4
np.random.seed(seed=seed)
tx_vector = np.reshape(np.matlib.repmat(np.random.randint(0,2,(5*samples_to_receive)/pulse_width)*2-1,pulse_width,1).T,[1,5*samples_to_receive])[0].tolist()
# delay signal vector -> insert zeros at beginnig; nothing happens if signal has not reached the receiver:
tx_vector_delayed = np.hstack((np.zeros(delay),tx_vector))
#tx_vector_delayed = tx_vector_delayed[:600]
self.vector_source = blocks.vector_source_c(tx_vector_delayed, False, 1, (timing_tag, rx_freq_tag, rx_rate_tag))
#clip first 600 samples
self.head = blocks.head(gr.sizeof_gr_complex*1, samples_to_receive + 300)
# skiphead= blocks.skiphead(gr.sizeof_gr_complex*1,delay)
throttle = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
noise = analog.noise_source_c(analog.GR_GAUSSIAN, noise_amp, -seed)
# connects
#self.connect(vector_source, mod, (add,0))
self.connect(self.vector_source, (add,0))
self.connect(noise, (add,1))
self.connect(add, throttle, self.head, self)
self.connect(add, tag_debug)
'''
f_sink = blocks.file_sink(gr.sizeof_gr_complex,"log_rx_"+str(rx_id)+".txt")
self.connect(add, f_sink)
'''
def next_vector(self, seed, samp_rate, noise_amp, modulation, delay, samples_to_receive, freq, rx_id):
timing_tag = gr.tag_t()
timing_tag.offset = 0
timing_tag.key = pmt.string_to_symbol('rx_time')
timing_tag.value = pmt.to_pmt((float(seed), 0.6))
timing_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
# Rx freq tags:
#print "In source emulation (before tag)"
#print freq
rx_freq_tag = gr.tag_t()
rx_freq_tag.offset = 0
rx_freq_tag.key = pmt.string_to_symbol('rx_freq')
rx_freq_tag.value = pmt.from_double(freq)
rx_freq_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
# Samp_rate tags:
rx_rate_tag = gr.tag_t()
rx_rate_tag.offset = 0
rx_rate_tag.key = pmt.string_to_symbol('rx_rate')
rx_rate_tag.value = pmt.from_double(samp_rate)
rx_rate_tag.srcid = pmt.string_to_symbol(str('gr uhd usrp source1'))
pulse_width = 4
np.random.seed(seed=seed)
tx_vector = np.reshape(np.matlib.repmat(np.random.randint(0,2,(5*samples_to_receive)/pulse_width)*2-1,pulse_width,1).T,[1,5*samples_to_receive])[0].tolist()
# delay signal vector -> insert zeros at beginnig; nothing happens if signal has not reached the receiver:
tx_vector_delayed = np.hstack((np.zeros(delay),tx_vector))
#tx_vector_delayed = tx_vector_delayed[:600]
print len(tx_vector_delayed)
self.vector_source.set_data(tx_vector_delayed,(timing_tag, rx_freq_tag, rx_rate_tag))
self.head.reset()
self.vector_source.rewind()
###############################################################################
# Options Parser
###############################################################################
def parse_options():
""" Options parser. """
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
parser.add_option("-s", "--serial", type="string", default="",
help="USRP serial number")
parser.add_option("", "--fusion-center", type="string", default="localhost",
help="Fusion center address")
parser.add_option("-g", "--gps", type="string", default="lc_xo",
help="GPS type")
parser.add_option("-i", "--id-rx", type="int", default="1",
help="Receiver ID")
parser.add_option("-d", "--delay", type="int", default="0",
help="Delay")
parser.add_option("", "--snr", type="float", default="20",
help="SNR")
parser.add_option("-m", "--modulation", type="string", default="ofdm",
help="Modulation type (BPSK/OFDM)")
parser.add_option("", "--coordinates-m", type="string", default="0.0,0.0",
help="Receiver coordinates in meters")
parser.add_option("", "--coordinates-wgs84", type="string", default="",
help="Receiver coordinates in meters")
parser.add_option("", "--dot-graph", action="store_true", default=False,
help="Generate dot-graph file from flowgraph")
parser.add_option("", "--ssh-proxy", action="store_true", default=False,
help="Activate when using a ssh proxy")
parser.add_option("-t", "--tx_coordinates", type="string", default="0.0,0.0",
help="Transmitter starting position for tracking simulations")
parser.add_option("", "--movement-file", type="string", default="EMPTY",
help="csv file with target coordinates. Generate e.g. with MATLAB")
(options, args) = parser.parse_args()
return options
###############################################################################
# Main
###############################################################################
if __name__ == "__main__":
options = parse_options()
tb = top_block(options)
if options.dot_graph:
# write a dot graph of the flowgraph to file
dot_str = tb.dot_graph()
file_str = os.path.expanduser('flowgraph.dot')
dot_file = open(file_str,'w')
dot_file.write(dot_str)
dot_file.close()
print "flowgraph can be found in: "+ file_str
try:
#tb.start()
tb.timer_register = threading.Thread(target = tb.register_receiver)
tb.timer_register.daemon = True
tb.timer_register.start()
# keep the program running when flowgraph is stopped
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
print "Shutting down flowgraph."
tb.rpc_manager.stop_watcher()
tb.stop()
tb.wait()
tb = None
|
auto_optimization.py
|
"""
This module contains the algorithm for optimizing the costs of energy systems.
"""
from datetime import datetime
from scipy.optimize import fmin_l_bfgs_b
import calendar
import cProfile
import copy
from collections import namedtuple
import numpy as np
from numpy import array
from server.devices.base import BaseEnvironment
from server.functions import get_configuration
import multiprocessing
from multiprocessing.process import Process
import os
from server.settings import BASE_DIR
from csv import writer
import dateutil
DEFAULT_FORECAST_INTERVAL = 1 * 3600.0
"""The interval for how long one auto_optimize will forecast and for how long one specific workload is set.
Note, that this constant also represents a compromise: Shorter intervals can adjust to quick changes,
f.e. electricity demands changes, while longer intervals can incorporate more forecasts, but wont be able
to adjust quickly.
The interval of one hour lead to good results in our tests.
"""
def auto_optimize(forecast):
""" Tries to optimize the cost and sets the ``cu.overwrite_workload``
The method forecasts from ``env.now`` with different cu workloads and finds the one with the
lowest cost. The length of the forecast is :attr:`DEFAULT_FORECAST_INTERVAL`.
:param forecast: the forecast to be optimized
"""
optimized_config = find_optimal_config(forecast)
cu = forecast.getCU()
cu.overwrite_workload = float(optimized_config["cu_overwrite_workload"])
print "optimization round at time: ",datetime.fromtimestamp(forecast.env.now),":", optimized_config
def find_optimal_config(initial_time, forecast):
""" ``Internal Method`` Main method, which optimizes the costs by running a global
approximation for the best configuration and then running a local minimization
method on this approximation"""
prices = {}
prices["gas_costs"] = get_configuration('gas_costs')
prices["electrical_costs"] = get_configuration('electrical_costs')
rewards = {}
rewards["thermal_revenues"] = get_configuration('thermal_revenues')
rewards["warmwater_revenues"] = get_configuration('warmwater_revenues')
rewards["electrical_revenues"] = get_configuration('electrical_revenues')
rewards["feed_in_reward"] = get_configuration('feed_in_reward')
arguments = (initial_time, forecast, prices, rewards)
#find initial approximation for parameters
results = []
for cu_load in range(0,100,10):
config = [cu_load,]
cost = estimate_cost(config, *arguments)
results.append(BilanceResult(cost, config))
boundaries = [(0.0,100.0)]
#take parameters with lowest cost
initial_parameters = min(results,key=lambda result: result.cost).params
parameters = fmin_l_bfgs_b(estimate_cost, x0 = array(initial_parameters),
args = arguments, bounds = boundaries,
approx_grad = True, factr=10**4, iprint=0,
epsilon=1, maxfun =50)
cu_workload, = parameters[0]
return {"cu_overwrite_workload":cu_workload}
def estimate_cost(params, *args):
"""``Internal Method`` copies the devices and environment, forwards it and returns the costs.
:param list params: parameter to be optimized (CU.workload for now)
:param args: (initial_time, forecast, prices, rewards)
"""
(initial_time, forecast, prices, rewards) = args
copied_devices = copy.deepcopy(forecast.devices)
cu = copied_devices.cu
cu.overwrite_workload = params[0]
simplified_forecast(cu.env, initial_time, copied_devices)
return total_costs(copied_devices, prices, rewards)
def simplified_forecast(env, initial_time, devices):
"""runs the forward loop only executing the step function"""
forward = DEFAULT_FORECAST_INTERVAL
while forward > 0:
for device in devices:
device.step()
env.now += env.step_size
forward -= env.step_size
def total_costs(devices, prices, rewards):
"""``Internal Method`` Returns the cost of a forecast run. The function uses the prices which are stored
in the db deviceconfiguration. It is also constrained by boundaries, f.e. the heatstorage should
never go below min temperature.
:param devices: The devices after the forecast
:param dict prices, rewards: Cached prices and rewards
"""
d = devices
cu,plb,ec,pm,tc,hs = d.cu,d.plb,d.ec,d.pm,d.tc,d.hs
#maintenance_costs = cu.power_on_count
gas_costs = (cu.total_gas_consumption + plb.total_gas_consumption) * prices["gas_costs"]
own_el_consumption = ec.total_consumption - pm.fed_in_electricity - pm.total_purchased
electric_rewards = pm.fed_in_electricity * rewards["feed_in_reward"] + own_el_consumption * rewards["electrical_revenues"]
electric_costs = pm.total_purchased * prices["electrical_costs"]
thermal_rewards = tc.total_consumed * rewards["thermal_revenues"]
final_cost = electric_costs-electric_rewards + gas_costs - thermal_rewards
temp = hs.get_temperature()
above_penalty = abs(min(hs.config["critical_temperature"] - temp, 0) * 1000)
below_penalty = abs(max(hs.config["min_temperature"] - temp, 0) * 1000)
small_penalties = (temp > hs.config["target_temperature"]+5) * 15 + (temp < hs.config["target_temperature"]-5) * 5
return final_cost + above_penalty + below_penalty + small_penalties
class BilanceResult(object):
""" wrapper for storing a optimization result"""
def __init__(self, cost, params):
self.params = params
self.cost = cost
####################################
######### multiprocess map #########
####################################
def multiprocess_map(target,params, *args):
mgr = multiprocessing.Manager()
dict_threadsafe = mgr.dict()
jobs = [Process(target=target_wrapper, args=(target,param,index,dict_threadsafe,args)) for index, param in enumerate(params)]
for job in jobs: job.start()
for job in jobs: job.join()
return dict_threadsafe.values()
def target_wrapper(target, params, index, dict_threadsafe, args):
dict_threadsafe[index] = BilanceResult(target(params, *args),params)
|
test_path.py
|
import unittest
import multiprocessing as mp
from generalfile import *
from generalfile.test.setup_workdir import setup_workdir
def _thread_test(queue, i):
queue.put(int(Path("test.txt").write(i, overwrite=True)))
class PathTest(unittest.TestCase):
def setUp(self):
"""Set working dir and clear folder. Set path delimiter to '/' for testing."""
setup_workdir()
class FileTest(PathTest):
""" Skipped: open_folder, view, scrub"""
def test_path(self):
self.assertRaises(InvalidCharacterError, Path, "hello:there")
self.assertRaises(InvalidCharacterError, Path, "hello<")
self.assertRaises(InvalidCharacterError, Path, "hello>")
self.assertRaises(InvalidCharacterError, Path, "hello.")
def test_addPath(self):
self.assertEqual(Path("foo/bar"), Path("foo") / "bar")
self.assertEqual(Path("foo/bar"), Path("foo") / Path("bar"))
self.assertEqual(Path("foo.txt/folder"), Path("foo.txt") / "folder")
self.assertEqual(Path("folder/foo.txt"), Path("folder") / "foo.txt")
def test_parts(self):
path = Path("folder/folder2/test.txt")
self.assertEqual(["folder", "folder2", "test.txt"], path.parts())
self.assertEqual("foo", Path("foo/bar").parts()[0])
self.assertEqual("bar", Path("foo/bar").parts()[1])
self.assertEqual(not Path.verInfo.pathRootIsDelimiter, bool(Path().absolute().parts()[0]))
def test_name(self):
path = Path("folder/test.txt")
self.assertEqual("test.txt", path.name())
self.assertEqual("folder/foobar.txt", path.with_name("foobar.txt"))
self.assertEqual("folder/hi", path.with_name("hi"))
def test_stem(self):
path = Path("folder/test.txt")
self.assertEqual("test", path.stem())
self.assertEqual("folder/foobar.txt", path.with_stem("foobar"))
path = Path("folder/test.foo.txt.bar")
self.assertEqual("test.foo.txt", path.stem())
self.assertEqual("folder/foo.bar", path.with_stem("foo"))
def test_true_stem(self):
path = Path("folder/test.txt")
self.assertEqual("test", path.true_stem())
self.assertEqual("folder/foobar.txt", path.with_true_stem("foobar"))
path = Path("folder/test.foo.txt.bar")
self.assertEqual("test", path.true_stem())
self.assertEqual("folder/yo.foo.txt.bar", path.with_true_stem("yo"))
def test_suffixes(self):
path = Path("folder/test.hello.txt")
self.assertEqual([".hello", ".txt"], path.suffixes())
self.assertEqual("folder/test.tsv", path.with_suffixes(".tsv"))
def test_suffix(self):
path = Path("folder/test")
self.assertEqual("", path.suffix())
path = Path("folder/test.txt")
self.assertEqual(".txt", path.suffix())
path = path.with_suffix("")
self.assertEqual("folder/test", path)
path = path.with_suffix(None)
self.assertEqual("folder/test", path)
path = path.with_suffix(".tsv")
self.assertEqual("folder/test.tsv", path)
path = path.with_suffix("")
self.assertEqual("folder/test", path)
path = path.with_suffix(".csv")
self.assertEqual("folder/test.csv", path)
path = path.with_suffix(".BACKUP", -2)
self.assertEqual("folder/test.BACKUP.csv", path)
path = path.with_suffix(".test", -2)
self.assertEqual("folder/test.test.csv", path)
path = path.with_suffix(None, 0)
self.assertEqual("folder/test.csv", path)
path = path.with_suffix(".foo", 2)
self.assertEqual("folder/test.csv.foo", path)
path = path.with_suffix(".bar", 3)
self.assertEqual("folder/test.csv.foo.bar", path)
path = path.with_suffix(".clamped", 5)
self.assertEqual("folder/test.csv.foo.bar.clamped", path)
path = path.with_suffix(".clamped", -10)
self.assertEqual("folder/test.clamped.csv.foo.bar.clamped", path)
path = path.with_suffix(None, 10)
self.assertEqual("folder/test.clamped.csv.foo.bar", path)
path = path.with_suffix(None, -10)
self.assertEqual("folder/test.csv.foo.bar", path)
def test_parent(self):
path = Path("folder/foobar/test.txt")
self.assertEqual(Path("folder/foobar"), path.get_parent())
self.assertEqual(Path("folder/foobar"), path.get_parent(0))
self.assertEqual(Path("folder"), path.get_parent(1, 1))
self.assertEqual(Path(), path.get_parent(2, 2))
self.assertEqual(None, path.get_parent(3, 3))
self.assertEqual(None, path.get_parent(99, 99))
self.assertEqual(None, path.get_parent(-99, -99))
self.assertEqual([Path("folder/foobar"), Path("folder"), Path()], path.get_parents(depth=-1))
new_path = Path("folder/foobar/test.txt")
self.assertEqual([Path("folder/foobar"), Path("folder"), Path()], new_path.get_parents(depth=-1))
def test_startswith(self):
self.assertFalse(Path("file.txt").startswith("folder"))
self.assertTrue(Path("file.txt").startswith("file"))
self.assertFalse(Path("folder/file.txt").startswith("file.txt"))
self.assertFalse(Path("folder/file.txt").absolute().startswith("folder"))
self.assertTrue(Path("folder/file.txt").startswith("folder"))
self.assertTrue(Path("file.txt").startswith("file.txt"))
self.assertTrue(Path("file.SUFFIX.txt").startswith("file.SUFFIX.txt"))
self.assertFalse(Path("filE.txt").startswith("file.txt"))
def test_endswith(self):
self.assertFalse(Path("file.txt").endswith("folder"))
self.assertFalse(Path("file.txt").endswith("file"))
self.assertFalse(Path("folder/file.txt").endswith("folder"))
self.assertFalse(Path("folder/file.txt").absolute().endswith("file"))
self.assertTrue(Path("folder/file.txt").endswith("file.txt"))
self.assertTrue(Path("folder/file.txt").endswith("txt"))
self.assertTrue(Path("file.txt").endswith("file.txt"))
self.assertFalse(Path("filE.txt").endswith("file.txt"))
def test_remove_start(self):
self.assertEqual(Path(), Path("test.txt").remove_start("test.txt"))
self.assertEqual(Path("folder/test.txt"), Path("folder/test.txt").remove_start("Folder"))
self.assertEqual(Path("test.txt"), Path("folder/test.txt").remove_start("folder"))
self.assertEqual(Path("folder/test.txt"), Path("folder/test.txt").remove_start("test"))
if Path.verInfo.pathRootIsDelimiter:
self.assertEqual(Path("test.txt"), Path("folder/test.txt").remove_start("folder"))
def test_remove_end(self):
self.assertEqual(Path(), Path("test.txt").remove_end("test.txt"))
self.assertEqual(Path("test"), Path("test.txt").remove_end(".txt"), "test")
self.assertEqual(Path("folder"), Path("folder/test.txt").remove_end("test.txt"))
self.assertEqual(Path("folder/test.txt"), Path("folder/test.txt").remove_end("test"))
def test_absolute(self):
path = Path("test.txt")
self.assertEqual(False, path.is_absolute())
self.assertEqual(True, path.is_relative())
path = path.absolute()
self.assertEqual(True, path.is_absolute())
self.assertEqual(False, path.is_relative())
path = path.relative()
self.assertEqual(False, path.is_absolute())
self.assertEqual(True, path.is_relative())
path = Path("folder/folder2/file.txt")
self.assertEqual(Path("folder2/file.txt"), path.relative("folder"))
self.assertEqual(path.relative("folder"), "folder2/file.txt")
self.assertEqual(path.relative("folder/folder2"), "file.txt")
self.assertEqual(path, path.relative("doesntexist"))
def test_mirror_path(self):
path = Path("foo")
self.assertEqual(path.mirror_path().mirror_path(), path)
self.assertEqual(True, path.mirror_path().is_absolute())
def test_is_file_or_folder(self):
Path("folder.txt/file.txt").write()
self.assertEqual(True, Path("folder.txt").is_folder())
self.assertEqual(False, Path("folder.txt").is_file())
self.assertEqual(True, Path("folder.txt/file.txt").is_file())
self.assertEqual(False, Path("folder.txt/file.txt").is_folder())
def test_exists(self):
path = Path("folder/test.txt")
self.assertEqual(False, path.exists())
self.assertEqual(False, Path("folder").exists())
path.write()
self.assertEqual(True, path.exists())
self.assertEqual(True, Path("folder").exists())
self.assertEqual(False, Path("folder/test").exists())
Path("folder").delete()
self.assertEqual(False, path.exists())
self.assertEqual(False, Path("folder").exists())
def test_working_dir(self):
self.assertEqual(True, Path.get_working_dir().is_absolute())
self.assertEqual(Path().absolute(), Path.get_working_dir())
Path("folder").set_working_dir()
self.assertEqual(True, Path.get_working_dir().endswith("folder"))
self.assertEqual(Path().absolute(), Path.get_working_dir())
def test_same_destination(self):
path = Path("folder")
self.assertEqual(True, path.same_destination(Path() / "folder"))
self.assertEqual(True, path.same_destination(path.absolute()))
self.assertEqual(True, path.same_destination("folder"))
def test_write(self):
self.assertEqual('"foobar"', Path("test.txt").write("foobar"))
self.assertEqual("foobar", Path("test.txt").read())
self.assertEqual('"foobar"', Path("test2").write("foobar"))
self.assertEqual("foobar", Path("test2").read())
self.assertEqual('"foobar"', Path("test2.doesntexist").write("foobar"))
self.assertEqual("foobar", Path("test2.doesntexist").read())
self.assertEqual('"foobar"', Path("folder/test.txt").write("foobar"))
self.assertEqual("foobar", Path("folder/test.txt").read())
def test_rename(self):
Path("folder/test.txt").write()
Path("folder/test.txt").rename(name="hello.txt")
self.assertTrue(Path("folder/hello.txt").exists())
self.assertFalse(Path("folder/test.txt").exists())
Path("folder").rename(name="folder2")
self.assertTrue(Path("folder2").exists())
self.assertFalse(Path("folder").exists())
Path("folder2/hello.txt").rename(name="foo.txt")
self.assertTrue(Path("folder2/foo.txt").exists())
Path("folder2/foo.txt").rename(name="foo.TEST.txt")
self.assertTrue(Path("folder2/foo.TEST.txt").exists())
Path("folder2/foo.TEST.txt").rename(name="foobar")
self.assertTrue(Path("folder2/foobar").is_file())
Path("folder2/foobar").rename(suffix=".test")
self.assertTrue(Path("folder2/foobar.test").exists())
self.assertEqual("folder2/hello.test", Path("folder2/foobar.test").rename(stem="hello"))
self.assertTrue(Path("folder2/hello.test").exists())
def test_copy(self):
Path("folder/test.txt").write()
Path("folder/test.txt").copy("foo.txt")
self.assertEqual(True, Path("folder/foo.txt").exists())
Path("folder").copy("new")
self.assertEqual(True, Path("new/foo.txt").exists())
Path("new/foo.txt").copy("new/bar.txt")
self.assertEqual(True, Path("new/bar.txt").exists())
def test_copy_to_folder(self):
Path("folder/test.txt").write()
Path("folder/test2.txt").write()
Path("folder").copy_to_folder("folder2")
self.assertEqual(True, Path("folder2/test.txt").exists())
self.assertEqual(True, Path("folder2/test2.txt").exists())
self.assertEqual(True, Path("folder/test2.txt").exists())
Path("folder/test.txt").copy_to_folder("")
self.assertEqual(True, Path("test.txt").exists())
self.assertEqual(False, Path("test2.txt").exists())
Path("folder").copy_to_folder(Path(), overwrite=True)
self.assertEqual(True, Path("test2.txt").exists())
def test_move(self):
Path("folder/test.txt").write(5)
Path("folder/test2.txt").write()
Path("folder").move("folder2")
self.assertEqual(False, Path("folder").exists())
self.assertEqual(True, Path("folder2/test.txt").exists())
self.assertEqual(True, Path("folder2/test2.txt").exists())
Path("folder2/test.txt").move("")
self.assertEqual(True, Path("test.txt").exists())
self.assertEqual(False, Path("test2.txt").exists())
self.assertEqual(False, Path("folder2/test.txt").exists())
Path("folder/test.txt").write(2)
with self.assertRaises(FileExistsError):
Path("folder").move(Path())
self.assertEqual(5, Path("test.txt").read())
Path("folder").move(Path(), overwrite=True)
self.assertEqual(2, Path("test.txt").read())
def test_create_folder(self):
path = Path("folder/folder2.txt")
path.create_folder()
self.assertEqual(True, path.is_folder())
def test_trash_and_delete(self):
for method in ("trash", "delete"):
path = Path("file.txt")
self.assertEqual(False, path.exists())
self.assertEqual(False, getattr(path, method)())
path.write()
self.assertEqual(True, path.exists())
self.assertEqual(True, getattr(path, method)())
self.assertEqual(False, getattr(path, method)())
path = Path("folder/file.txt")
self.assertEqual(False, path.exists())
self.assertEqual(False, getattr(path, method)())
path.write()
self.assertEqual(True, path.exists())
self.assertEqual(True, getattr(path.get_parent(), method)())
self.assertEqual(False, getattr(path.get_parent(), method)())
self.assertEqual(False, Path("folder").exists())
def test_trash_and_delete_folder_content(self):
for method in ("trash_folder_content", "delete_folder_content"):
setup_workdir()
mainPath = Path("folder")
path = mainPath / "file.txt"
path2 = mainPath / "folder2/file2.txt"
self.assertEqual(False, mainPath.exists())
self.assertEqual(False, getattr(mainPath, method)())
for targetPath in (mainPath, ):
path.write()
path2.write()
self.assertEqual(True, getattr(targetPath, method)())
self.assertEqual(False, getattr(targetPath, method)())
self.assertEqual(True, mainPath.exists())
self.assertEqual(False, path.exists())
self.assertEqual(False, path2.exists())
def test_get_paths(self):
Path("test.txt").write()
Path("folder/test2.txt").write()
Path("folder/test3.txt").write()
self.assertEqual(2, len(Path().get_children()))
self.assertEqual(3, len(Path().get_children(include_self=True)))
self.assertEqual(0, len(Path("test.txt").get_children()))
self.assertEqual(1, len(Path("test.txt").get_children(include_self=True)))
self.assertEqual(0, len(Path("test.txt").get_children()))
self.assertEqual(4, len(Path().get_children(depth=3)))
self.assertEqual(5, len(Path().get_children(depth=1, include_self=True)))
self.assertEqual(5, len(Path().get_children(depth=-1, include_self=True)))
self.assertEqual(3, len(Path().get_children(depth=0, include_self=True)))
self.assertEqual(0, len(Path("folder/test2.txt").get_children(depth=-1)))
self.assertEqual(["folder/test2.txt", "folder/test3.txt"], Path("folder").get_children())
def test_time_created_and_modified(self):
path = Path("test.txt")
methods = (path.seconds_since_creation, path.seconds_since_modified)
for method in methods:
self.assertRaises(AttributeError, method)
path.write()
for method in methods:
self.assertGreater(method(), 0)
# Think you need to flush and stuff to make this work for windows atleast
# self.assertEqual(methods[0](), methods[1]())
# path.write("foobar", overwrite=True)
# self.assertNotEqual(methods[0](), methods[1]())
def test_getitem(self):
self.assertEqual("f", Path("foobar")[0])
self.assertEqual("fo", Path("foobar")[0:2])
self.assertEqual("raboof", Path("foobar")[-1::-1])
def test_iter(self):
self.assertEqual(["f", "o", "o"], list(Path("foo")))
self.assertIn("foo", Path("foobar"))
def test_root(self):
str_path = Path().absolute().get_parent(depth=-1, index=-1).path
if Path.verInfo.pathRootIsDelimiter:
self.assertEqual("/", str_path)
else:
self.assertTrue(len(str_path) == 3 and str_path[1] == ":" and str_path[2] == Path.path_delimiter)
self.assertEqual(True, Path().absolute().get_parent(-1, -1).is_root())
self.assertEqual(False, Path("foo").is_root())
self.assertEqual(False, Path().absolute().is_root())
def test_as_working_dir(self):
working_dir = Path.get_working_dir()
with Path("hello").as_working_dir():
self.assertEqual(working_dir / "hello", Path.get_working_dir())
self.assertEqual(working_dir, Path.get_working_dir())
def test_match(self):
self.assertEqual(True, Path("hello/there").match("The*"))
self.assertEqual(True, Path("hello/there").match("*"))
self.assertEqual(True, Path("hello/there").match("*h*"))
self.assertEqual(True, Path(".git").match(".*"))
self.assertEqual(True, Path(".git").match("."))
self.assertEqual(True, Path("hello/there").match("The"))
self.assertEqual(True, Path("hello/there").match("hello/there"))
self.assertEqual(True, Path("foo/bar/hi").match("/bar/"))
self.assertEqual(True, Path("foo/bar/hi").match("\\bar\\"))
self.assertEqual(False, Path("hello/there").match("x"))
self.assertEqual(False, Path("hello/there").match("*x*"))
self.assertEqual(False, Path("hello/there").match("there/"))
def test_encode(self):
self.assertEqual("foo/bar", Path("foo\\bar").encode())
self.assertEqual("foo/bar", Path("foo/bar").encode())
self.assertEqual("foo%20bar", Path("foo bar").encode())
self.assertEqual("foo/bar/hi%20there", Path("foo/bar\\hi there").encode())
self.assertEqual("_hello/there_now.py", Path("_hello/there_now.py").encode())
self.assertEqual("foo/_bar_now", Path("foo\\_bar_now").encode())
def test_threads(self):
threads = []
queue = mp.Queue()
count = 2
for i in range(count):
threads.append(mp.Process(target=_thread_test, args=(queue, i)))
for thread in threads:
thread.start()
results = []
for i in range(count):
get = queue.get()
self.assertNotIn(get, results)
results.append(get)
self.assertEqual(len(results), count)
def test_CaseSensitivityError(self):
Path("foo.txt").write("hi")
# Path("foo.txt").get_parent()
self.assertRaises(CaseSensitivityError, Path("Foo.txt").exists)
def test_get_alternative_path(self):
path = Path("foo/bar.txt")
self.assertEqual(path, path.to_alternative().from_alternative())
path = path.absolute()
self.assertEqual(path, path.to_alternative().from_alternative())
def test_get_cache_dir(self):
self.assertEqual(True, Path.get_lock_dir().startswith(Path.get_cache_dir()))
def test_lock(self):
path = Path("foo.txt")
with path.lock():
self.assertEqual(True, path.get_lock_path().exists())
def test_open_operation(self):
path = Path("foo.txt")
with path.lock():
path.open_operation("w", lambda stream: stream.write("hi"))
self.assertEqual("hi", path.open_operation("r", lambda stream: stream.read()))
def test_size(self):
path = Path("foo.txt")
path.write("bar")
self.assertEqual(True, path.size() > 1)
def test_without_file(self):
path = Path("foo/bar")
self.assertEqual("foo/bar", path.without_file())
path.write()
self.assertEqual("foo", path.without_file())
def test_get_differing_files(self):
Path("one/bar").write("hey")
Path("one/foo").write("hello")
Path("two/foo").write("hi")
for base, target in (("one", "two"), ("two", "one")):
self.assertEqual({"bar"}, Path(base).get_differing_files(target, exist=True, content=False))
self.assertEqual({"foo"}, Path(base).get_differing_files(target, exist=False, content=True))
self.assertEqual({"foo", "bar"}, Path(base).get_differing_files(target, exist=True, content=True))
self.assertEqual(set(), Path(base).get_differing_files(target, exist=False, content=False))
def test_contains(self):
path = Path("foo")
path.text.write("hello there test")
self.assertEqual(True, path.contains("there"))
self.assertEqual(False, path.contains("hi"))
def test_is_identical(self):
Path("foo").write("hello")
Path("bar").write("hello")
self.assertEqual(True, Path("foo").is_identical("bar"))
Path("bar").write("hi", overwrite=True)
self.assertEqual(False, Path("foo").is_identical("bar"))
Path("foo").write("hi\n", overwrite=True)
self.assertEqual(False, Path("foo").is_identical("bar"))
def test_empty(self):
self.assertEqual(True, Path().empty())
self.assertEqual(True, Path("new").empty())
Path("new").create_folder()
self.assertEqual(True, Path("new").empty())
Path("new/file.txt").write("foo")
self.assertEqual(False, Path("new").empty())
self.assertEqual(False, Path("new/file.txt").empty())
Path("new/file.txt").delete()
self.assertEqual(True, Path("new").empty())
def test_pack(self):
Path("base/test.txt").write("hello")
Path("base").pack("target")
self.assertEqual(True, Path("target.zip").exists())
Path("target").unpack("new")
self.assertEqual("hello", Path("new/test.txt").read())
Path("base").pack("target.tar.gz").unpack("tarnew")
self.assertEqual("hello", Path("tarnew/test.txt").read())
Path("base/folder/hi").write("there")
Path("base").pack("packed/pack.zip").unpack("newbase")
self.assertEqual("hello", Path("newbase/test.txt").read())
self.assertEqual("there", Path("newbase/folder/hi").read())
def test_recycle(self):
self.assertIs(Path("hi/there"), Path("hi/there"))
self.assertIs(Path("hi/there")._children, Path("hi/there")._children)
self.assertIs(Path("hi/there"), Path("hi\\there"))
self.assertIs(Path("hi/there"), Path("hi") / "there")
self.assertIs(Path("hi\\there"), Path("hi") / "there")
self.assertIsNot(Path("hithere"), Path("hi") / "there")
self.assertIsNot(Path("hi"), Path("hi").absolute())
def test_read_empty(self):
with self.assertRaises(FileNotFoundError):
Path("hey").read()
self.assertEqual(None, Path("hi").read(default=None))
|
main.pyw
|
import subprocess;
import tkinter;
import os;
import threading;
def getNetworkInformation(pingMeter, canvas):
global running;
#gatewayAddressProcess = subprocess.Popen(["ipconfig"], shell = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin = subprocess.PIPE, cwd = os.getcwd(), env = os.environ);
#gatewayAddressProcess.stdin.close();
#gatewayAddress = gatewayAddressProcess.stdout.read().decode("utf-8").strip()[-32:].strip();
gatewayAddress = "8.8.8.8"
networkData = [];
networkDataRawBuffer = [];
while running:
pingShell = subprocess.Popen(["ping", "-w", "1000", "-n", "20", gatewayAddress], shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, stdin = subprocess.PIPE, cwd = os.getcwd(), env = os.environ);
pingShell.stdin.close();
while running:
if pingShell.poll() == 0:
break;
else:
pingShellLine = pingShell.stdout.readline().decode("utf-8").strip();
if ("Reply from " in pingShellLine) or (pingShellLine == "Request timed out."):
if "Reply from " in pingShellLine:
networkDataRawBuffer.append(int(pingShellLine[pingShellLine.index("time") + 5:pingShellLine.find("ms")]));
elif pingShellLine == "Request timed out.":
networkDataRawBuffer.append(1000);
if len(networkDataRawBuffer) > 5:
networkDataRawBuffer.pop(0);
if len(networkDataRawBuffer) == 5:
networkData.append(round(sum(networkDataRawBuffer)/5));
if len(networkData) > 36:
networkData.pop(0);
if len(networkData) > 1 and running:
threading.Thread(target = drawNetworkInformation, args = (pingMeter, canvas, networkData)).start();
pingShell.kill();
def drawNetworkInformation(pingMeter, canvas, networkData):
global running;
if running:
pingMeter.config(text = "Current Ping: " + str(networkData[-1]) + " ms");
canvas.create_rectangle(2, 0, 700, 698, fill = "#212121", outline = "");
drawPoints = [(720 - (len(networkData) * 20)), 698];
for i in range(len(networkData)):
drawPoints.extend([720 - ((len(networkData) - i) * 20), 698 - round((698 * networkData[i])/1000)]);
drawPoints.extend([700, 698]);
if running:
canvas.create_polygon(drawPoints, fill = "#175e64");
canvas.create_line(drawPoints[2:-2], fill = "#00eeff", width = 2);
canvas.create_line(1, 0, 1, 700, fill = "#00eeff", width = 2);
def main():
global running;
running = True;
root = tkinter.Tk();
#root.wm_iconbitmap("icon.ico");
root.config(bg = "#212121");
root.title("Network Visualizer");
root.geometry("800x800");
root.resizable(False, False);
canvas = tkinter.Canvas(root);
canvas.config(bg = "#212121", bd = 0, highlightthickness = 0, relief = "ridge");
canvas.place(x = 100, y = 0, width = 700, height = 700);
canvas.create_line(1, 0, 1, 700, fill = "#00eeff", width = 2);
canvas.create_line(0, 699, 700, 699, fill = "#00eeff", width = 2);
yLabel = tkinter.Label(root, text = "Ping\n(ms)", font = ("Helvetica", 12), bg = "#212121", fg = "#00eeff");
yLabel.place(x = 30, y = 325, width = 40, height = 50);
xLabel = tkinter.Label(root, text = "Time", font = ("Helvetica", 12), bg = "#212121", fg = "#00eeff");
xLabel.place(x = 430, y = 735, width = 40, height = 30);
pingMeter = tkinter.Label(root, text = "Current Ping: N/A", font = ("Helvetica", 12), bg = "#212121", fg = "#00eeff");
pingMeter.place(x = 20, y = 735, width = 180, height = 30);
networkThread = threading.Thread(target = getNetworkInformation, args = (pingMeter, canvas));
networkThread.start();
root.mainloop();
root.quit();
running = False;
main();
|
alarm_sound.py
|
import tkinter as tk
from tkinter.font import Font
import pygame.mixer
import wave
from os.path import relpath
from threading import Thread
class AlarmSound():
def __init__(self, parent, file="default", set_window=False):
self.parent = parent
self.file = file # File path or "default" alarm Sound
self.set_window = set_window
self.thread = Thread(target=self.on_active, daemon=True)
if file == "default":
self.file = relpath("alarm-clock/assets/sounds/beep_beep.wav")
wav = wave.open(self.file)
frequency = wav.getframerate()
pygame.mixer.init(frequency=frequency)
self.sound = pygame.mixer.Sound(self.file)
self.thread.start()
def on_active(self):
if self.set_window is True:
self.window = tk.Toplevel(self.parent)
self.window.title("Beep Beep!")
self.label = tk.Label(self.window, text="Beep Beep!",
font=Font(family='Helvetica', size=36,
weight='bold'))
self.stop_button = tk.Button(self.window, text="Stop",
command=self.stop_sound,
font=Font(family='Helvetica',
size=20))
self.label.pack(side=tk.TOP, fill=tk.BOTH, expand=3)
self.stop_button.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.sound.play(loops=-1)
else:
self.sound.play(loops=-1)
def stop_sound(self):
self.sound.stop()
if self.set_window is True:
self.window.destroy()
self.parent.alarm_sound = None
if __name__ == "__main__":
root = tk.Tk()
root.geometry("400x100")
root.minsize(400, 100)
root.title("Alarm Sound Test")
alarm_sound = AlarmSound(root, set_window=True)
root.mainloop()
|
TestFlight.py
|
# Started from Tello Template
# This Python app is in the Public domain
# Some parts from Tello3.py
import threading, socket, sys, time, subprocess
# GLOBAL VARIABLES DECLARED HERE....
host = ''
port = 9000
locaddr = (host,port)
tello_address = ('192.168.10.1', 8889) # Get the Tello drone's address
# Creates a UDP socketd
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(locaddr)
def recv():
count = 0
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print ('\n****Keep Eye on Drone****\n')
break
def sendmsg(msg, sleep = 6):
print("Sending: " + msg)
msg = msg.encode(encoding="utf-8")
sock.sendto(msg, tello_address)
time.sleep(sleep)
# recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
# CREATE FUNCTIONS HERE....
# Square
def square():
sendmsg("up 75")
for i in range(4):
sendmsg("forward 100")
sendmsg("ccw 90")
# Triangle
def triangle():
sendmsg("up 75")
for i in range(3):
sendmsg("forward 100",3)
sendmsg("ccw 120",3)
print("\nWilliam Julian")
print("Project Name: Test Flight ")
import datetime
now = datetime.datetime.now()
print("Date: "+now.strftime("%m-%d-%y %H:%M"))
print("\n****CHECK YOUR TELLO WIFI ADDRESS****")
print("\n****CHECK SURROUNDING AREA BEFORE FLIGHT****")
ready = input('\nAre you ready to take flight: ')
try:
if ready.lower() == 'yep':
print("\nStarting Drone!\n")
sendmsg('command', 0)
sendmsg('takeoff')
triangle()
sendmsg('land')
print('\nFLIGHT SUCCESSFUL!! :)')
else:
print('\n****ERROR****\n')
except KeyboardInterrupt:
sendmsg('emergency')
breakr = True
sock.close()
|
motors.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-01-18
# modified: 2021-02-08
#
# To start pigpiod:
#
# % sudo systemctl start pigpiod
#
# To enable/disable pigpiod on boot:
#
# % sudo systemctl [enable|disable] pigpiod
#
# To control the daemon:
#
# % sudo systemctl [start|stop|status] pigpiod
#
import sys, time, traceback
from threading import Thread
from fractions import Fraction
from colorama import init, Fore, Style
init()
try:
import pigpio
except ImportError:
print(Fore.RED + "This script requires the pigpio module.\nInstall with: sudo apt install python3-pigpio" + Style.RESET_ALL)
# sys.exit(1)
from lib.logger import Logger, Level
from lib.event import Event
from lib.enums import Direction, Orientation
from lib.slew import SlewRate
# ..............................................................................
class Motors():
'''
A dual motor controller with encoders.
'''
def __init__(self, config, ticker, tb, level):
super().__init__()
self._log = Logger('motors', level)
self._log.info('initialising motors...')
if config is None:
raise Exception('no config argument provided.')
if ticker is None:
raise Exception('no ticker argument provided.')
self._ticker = ticker
if tb is None:
tb = self._configure_thunderborg_motors(level)
if tb is None:
raise Exception('unable to configure thunderborg.')
self._tb = tb
self._set_max_power_ratio()
# config pigpio's pi and name its callback thread (ex-API)
try:
self._pi = pigpio.pi()
if self._pi is None:
raise Exception('unable to instantiate pigpio.pi().')
elif self._pi._notify is None:
raise Exception('can\'t connect to pigpio daemon; did you start it?')
self._pi._notify.name = 'pi.callback'
self._log.info('pigpio version {}'.format(self._pi.get_pigpio_version()))
from lib.motor import Motor
self._log.info('imported Motor.')
except Exception as e:
self._log.error('error importing and/or configuring Motor: {}'.format(e))
traceback.print_exc(file=sys.stdout)
sys.exit(1)
self._port_motor = Motor(config, self._ticker, self._tb, self._pi, Orientation.PORT, level)
self._port_motor.set_max_power_ratio(self._max_power_ratio)
self._stbd_motor = Motor(config, self._ticker, self._tb, self._pi, Orientation.STBD, level)
self._stbd_motor.set_max_power_ratio(self._max_power_ratio)
self._closed = False
self._enabled = False # used to be enabled by default
# a dictionary of motor # to last set value
self._msgIndex = 0
self._last_set_power = { 0:0, 1:0 }
self._log.info('motors ready.')
# ..........................................................................
def name(self):
return 'Motors'
# ..........................................................................
def _configure_thunderborg_motors(self, level):
'''
Import the ThunderBorg library, then configure the Motors.
'''
self._log.info('configure thunderborg & motors...')
global pi
try:
self._log.info('importing thunderborg...')
import lib.ThunderBorg3 as ThunderBorg
self._log.info('successfully imported thunderborg.')
TB = ThunderBorg.ThunderBorg(level) # create a new ThunderBorg object
TB.Init() # set the board up (checks the board is connected)
self._log.info('successfully instantiated thunderborg.')
if not TB.foundChip:
boards = ThunderBorg.ScanForThunderBorg()
if len(boards) == 0:
self._log.error('no thunderborg found, check you are attached.')
else:
self._log.error('no ThunderBorg at address {:02x}, but we did find boards:'.format(TB.i2cAddress))
for board in boards:
self._log.info('board {:02x} {:d}'.format(board, board))
self._log.error('if you need to change the I²C address change the setup line so it is correct, e.g. TB.i2cAddress = {:0x}'.format(boards[0]))
sys.exit(1)
TB.SetLedShowBattery(True)
return TB
except Exception as e:
self._log.error('unable to import thunderborg: {}'.format(e))
traceback.print_exc(file=sys.stdout)
sys.exit(1)
# ..........................................................................
def set_led_show_battery(self, enable):
self._tb.SetLedShowBattery(enable)
# ..........................................................................
def set_led_color(self, color):
self._tb.SetLed1(color.red/255.0, color.green/255.0, color.blue/255.0)
# ..........................................................................
def _set_max_power_ratio(self):
pass
# initialise ThunderBorg ...........................
self._log.info('getting battery reading...')
# get battery voltage to determine max motor power
# could be: Makita 12V or 18V power tool battery, or 12-20V line supply
voltage_in = self._tb.GetBatteryReading()
if voltage_in is None:
raise OSError('cannot continue: cannot read battery voltage.')
self._log.info('voltage in: {:>5.2f}V'.format(voltage_in))
# voltage_in = 20.5
# maximum motor voltage
voltage_out = 9.0
self._log.info('voltage out: {:>5.2f}V'.format(voltage_out))
if voltage_in < voltage_out:
raise OSError('cannot continue: battery voltage too low ({:>5.2f}V).'.format(voltage_in))
# Setup the power limits
if voltage_out > voltage_in:
self._max_power_ratio = 1.0
else:
self._max_power_ratio = voltage_out / float(voltage_in)
# convert float to ratio format
self._log.info('battery level: {:>5.2f}V; motor voltage: {:>5.2f}V;'.format( voltage_in, voltage_out) + Fore.CYAN + Style.BRIGHT \
+ ' maximum power ratio: {}'.format(str(Fraction(self._max_power_ratio).limit_denominator(max_denominator=20)).replace('/',':')))
# ..........................................................................
def get_motor(self, orientation):
if orientation is Orientation.PORT:
return self._port_motor
else:
return self._stbd_motor
# ..........................................................................
def is_in_motion(self):
'''
Returns true if either motor is moving.
'''
return self._port_motor.is_in_motion() or self._stbd_motor.is_in_motion()
# ..........................................................................
def get_steps(self):
'''
Returns the port and starboard motor step count.
'''
return [ self._port_motor.get_steps() , self._stbd_motor.get_steps() ]
# ..........................................................................
def get_current_power_level(self, orientation):
'''
Returns the last set power of the specified motor.
'''
if orientation is Orientation.PORT:
return self._port_motor.get_current_power_level()
else:
return self._stbd_motor.get_current_power_level()
# ..........................................................................
def interrupt(self):
'''
Interrupt any motor loops by setting the _interrupt flag.
'''
self._port_motor.interrupt()
self._stbd_motor.interrupt()
# ..........................................................................
def halt(self):
'''
Quickly (but not immediately) stops both motors.
'''
self._log.info('halting...')
if self.is_stopped():
_tp = Thread(name='halt-port', target=self.processStop, args=(Event.HALT, Orientation.PORT))
_ts = Thread(name='hapt-stbd', target=self.processStop, args=(Event.HALT, Orientation.STBD))
_tp.start()
_ts.start()
else:
self._log.debug('already stopped.')
self._log.info('halted.')
return True
# ..........................................................................
def brake(self):
'''
Slowly coasts both motors to a stop.
'''
self._log.info('braking...')
if self.is_stopped():
_tp = Thread(name='brake-port', target=self.processStop, args=(Event.BRAKE, Orientation.PORT))
_ts = Thread(name='brake-stbd', target=self.processStop, args=(Event.BRAKE, Orientation.STBD))
_tp.start()
_ts.start()
else:
self._log.warning('already stopped.')
self._log.info('braked.')
return True
# ..........................................................................
def stop(self):
'''
Stops both motors immediately, with no slewing.
'''
self._log.info('stopping...')
if not self.is_stopped():
self._port_motor.stop()
self._stbd_motor.stop()
self._log.info('stopped.')
else:
self._log.warning('already stopped.')
return True
# ..........................................................................
def is_stopped(self):
return self._port_motor.is_stopped() and self._stbd_motor.is_stopped()
# ..........................................................................
def processStop(self, event, orientation):
'''
Synchronised process control over various kinds of stopping.
'''
if orientation is Orientation.PORT:
if event is Event.HALT:
self._log.info('halting port motor...')
self._port_motor.halt()
elif event is Event.BRAKE:
self._log.info('braking port motor...')
self._port_motor.brake()
else: # is stop
self._log.info('stopping port motor...')
self._port_motor.stop()
else:
if event is Event.HALT:
self._log.info('halting starboard motor...')
self._stbd_motor.halt()
elif event is Event.BRAKE:
self._log.info('braking starboard motor...')
self._stbd_motor.brake()
else: # is stop
self._log.info('stopping starboard motor...')
self._stbd_motor.stop()
self.print_current_power_levels()
# ..........................................................................
def get_current_power_levels(self):
'''
Returns the last set power values.
'''
_port_power = self._port_motor.get_current_power_level()
_stbd_power = self._stbd_motor.get_current_power_level()
return [ _port_power, _stbd_power ]
# ..........................................................................
def print_current_power_levels(self):
'''
Prints the last set power values.
'''
self._msgIndex += 1
self._log.info('{}:\tcurrent power:\t{:6.1f}\t{:6.1f}'.format(self._msgIndex, self._last_set_power[0], self._last_set_power[1]))
# ..........................................................................
def enable(self):
'''
Enables the motors, ticker and velocity calculator. This issues
a warning if already enabled, but no harm is done in calling
it repeatedly.
'''
if self._enabled:
self._log.warning('already enabled.')
if not self._port_motor.enabled:
self._port_motor.enable()
if not self._stbd_motor.enabled:
self._stbd_motor.enable()
self._ticker.enable()
self._enabled = True
self._log.info('enabled.')
# ..........................................................................
def disable(self):
'''
Disable the motors, halting first if in motion.
'''
if self._enabled:
self._log.info('disabling...')
self._enabled = False
if self.is_in_motion(): # if we're moving then halt
self._log.warning('event: motors are in motion (halting).')
self.halt()
self._port_motor.disable()
self._stbd_motor.disable()
self._log.info('disabling pigpio...')
self._pi.stop()
self._log.info('disabled.')
else:
self._log.debug('already disabled.')
# ..........................................................................
def close(self):
'''
Halts, turn everything off and stop doing anything.
'''
if not self._closed:
if self._enabled:
self.disable()
self._log.info('closing...')
self._port_motor.close()
self._stbd_motor.close()
self._closed = True
self._log.info('closed.')
else:
self._log.debug('already closed.')
# ..........................................................................
@staticmethod
def cancel():
print('cancelling motors...')
Motor.cancel()
#EOF
|
chrome_test_server_spawner.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
# pylint: disable=W0702
import BaseHTTPServer
import json
import logging
import os
import select
import struct
import subprocess
import sys
import threading
import time
import urlparse
from devil.android import ports
from pylib import constants
from pylib.forwarder import Forwarder
# Path that are needed to import necessary modules when launching a testserver.
os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + (':%s:%s:%s:%s:%s'
% (os.path.join(constants.DIR_SOURCE_ROOT, 'third_party'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'tlslite'),
os.path.join(constants.DIR_SOURCE_ROOT, 'third_party', 'pyftpdlib',
'src'),
os.path.join(constants.DIR_SOURCE_ROOT, 'net', 'tools', 'testserver'),
os.path.join(constants.DIR_SOURCE_ROOT, 'sync', 'tools', 'testserver')))
SERVER_TYPES = {
'http': '',
'ftp': '-f',
'sync': '', # Sync uses its own script, and doesn't take a server type arg.
'tcpecho': '--tcp-echo',
'udpecho': '--udp-echo',
}
# The timeout (in seconds) of starting up the Python test server.
TEST_SERVER_STARTUP_TIMEOUT = 10
def _WaitUntil(predicate, max_attempts=5):
"""Blocks until the provided predicate (function) is true.
Returns:
Whether the provided predicate was satisfied once (before the timeout).
"""
sleep_time_sec = 0.025
for _ in xrange(1, max_attempts):
if predicate():
return True
time.sleep(sleep_time_sec)
sleep_time_sec = min(1, sleep_time_sec * 2) # Don't wait more than 1 sec.
return False
def _CheckPortAvailable(port):
"""Returns True if |port| is available."""
return _WaitUntil(lambda: ports.IsHostPortAvailable(port))
def _CheckPortNotAvailable(port):
"""Returns True if |port| is not available."""
return _WaitUntil(lambda: not ports.IsHostPortAvailable(port))
def _CheckDevicePortStatus(device, port):
"""Returns whether the provided port is used."""
return _WaitUntil(lambda: ports.IsDevicePortUsed(device, port))
def _GetServerTypeCommandLine(server_type):
"""Returns the command-line by the given server type.
Args:
server_type: the server type to be used (e.g. 'http').
Returns:
A string containing the command-line argument.
"""
if server_type not in SERVER_TYPES:
raise NotImplementedError('Unknown server type: %s' % server_type)
if server_type == 'udpecho':
raise Exception('Please do not run UDP echo tests because we do not have '
'a UDP forwarder tool.')
return SERVER_TYPES[server_type]
class TestServerThread(threading.Thread):
"""A thread to run the test server in a separate process."""
def __init__(self, ready_event, arguments, device, tool):
"""Initialize TestServerThread with the following argument.
Args:
ready_event: event which will be set when the test server is ready.
arguments: dictionary of arguments to run the test server.
device: An instance of DeviceUtils.
tool: instance of runtime error detection tool.
"""
threading.Thread.__init__(self)
self.wait_event = threading.Event()
self.stop_flag = False
self.ready_event = ready_event
self.ready_event.clear()
self.arguments = arguments
self.device = device
self.tool = tool
self.test_server_process = None
self.is_ready = False
self.host_port = self.arguments['port']
assert isinstance(self.host_port, int)
# The forwarder device port now is dynamically allocated.
self.forwarder_device_port = 0
# Anonymous pipe in order to get port info from test server.
self.pipe_in = None
self.pipe_out = None
self.process = None
self.command_line = []
def _WaitToStartAndGetPortFromTestServer(self):
"""Waits for the Python test server to start and gets the port it is using.
The port information is passed by the Python test server with a pipe given
by self.pipe_out. It is written as a result to |self.host_port|.
Returns:
Whether the port used by the test server was successfully fetched.
"""
assert self.host_port == 0 and self.pipe_out and self.pipe_in
(in_fds, _, _) = select.select([self.pipe_in, ], [], [],
TEST_SERVER_STARTUP_TIMEOUT)
if len(in_fds) == 0:
logging.error('Failed to wait to the Python test server to be started.')
return False
# First read the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the Python test server packs
# size as native byte order and all Chromium platforms so far are
# configured to use little-endian.
# TODO(jnd): Change the Python test server and local_test_server_*.cc to
# use a unified byte order (either big-endian or little-endian).
data_length = os.read(self.pipe_in, struct.calcsize('=L'))
if data_length:
(data_length,) = struct.unpack('=L', data_length)
assert data_length
if not data_length:
logging.error('Failed to get length of server data.')
return False
port_json = os.read(self.pipe_in, data_length)
if not port_json:
logging.error('Failed to get server data.')
return False
logging.info('Got port json data: %s', port_json)
port_json = json.loads(port_json)
if port_json.has_key('port') and isinstance(port_json['port'], int):
self.host_port = port_json['port']
return _CheckPortNotAvailable(self.host_port)
logging.error('Failed to get port information from the server data.')
return False
def _GenerateCommandLineArguments(self):
"""Generates the command line to run the test server.
Note that all options are processed by following the definitions in
testserver.py.
"""
if self.command_line:
return
args_copy = dict(self.arguments)
# Translate the server type.
type_cmd = _GetServerTypeCommandLine(args_copy.pop('server-type'))
if type_cmd:
self.command_line.append(type_cmd)
# Use a pipe to get the port given by the instance of Python test server
# if the test does not specify the port.
assert self.host_port == args_copy['port']
if self.host_port == 0:
(self.pipe_in, self.pipe_out) = os.pipe()
self.command_line.append('--startup-pipe=%d' % self.pipe_out)
# Pass the remaining arguments as-is.
for key, values in args_copy.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
if value is None:
self.command_line.append('--%s' % key)
else:
self.command_line.append('--%s=%s' % (key, value))
def _CloseUnnecessaryFDsForTestServerProcess(self):
# This is required to avoid subtle deadlocks that could be caused by the
# test server child process inheriting undesirable file descriptors such as
# file lock file descriptors.
for fd in xrange(0, 1024):
if fd != self.pipe_out:
try:
os.close(fd)
except:
pass
def run(self):
logging.info('Start running the thread!')
self.wait_event.clear()
self._GenerateCommandLineArguments()
command = constants.DIR_SOURCE_ROOT
if self.arguments['server-type'] == 'sync':
command = [os.path.join(command, 'sync', 'tools', 'testserver',
'sync_testserver.py')] + self.command_line
else:
command = [os.path.join(command, 'net', 'tools', 'testserver',
'testserver.py')] + self.command_line
logging.info('Running: %s', command)
# Pass DIR_SOURCE_ROOT as the child's working directory so that relative
# paths in the arguments are resolved correctly.
self.process = subprocess.Popen(
command, preexec_fn=self._CloseUnnecessaryFDsForTestServerProcess,
cwd=constants.DIR_SOURCE_ROOT)
if self.process:
if self.pipe_out:
self.is_ready = self._WaitToStartAndGetPortFromTestServer()
else:
self.is_ready = _CheckPortNotAvailable(self.host_port)
if self.is_ready:
Forwarder.Map([(0, self.host_port)], self.device, self.tool)
# Check whether the forwarder is ready on the device.
self.is_ready = False
device_port = Forwarder.DevicePortForHostPort(self.host_port)
if device_port and _CheckDevicePortStatus(self.device, device_port):
self.is_ready = True
self.forwarder_device_port = device_port
# Wake up the request handler thread.
self.ready_event.set()
# Keep thread running until Stop() gets called.
_WaitUntil(lambda: self.stop_flag, max_attempts=sys.maxint)
if self.process.poll() is None:
self.process.kill()
Forwarder.UnmapDevicePort(self.forwarder_device_port, self.device)
self.process = None
self.is_ready = False
if self.pipe_out:
os.close(self.pipe_in)
os.close(self.pipe_out)
self.pipe_in = None
self.pipe_out = None
logging.info('Test-server has died.')
self.wait_event.set()
def Stop(self):
"""Blocks until the loop has finished.
Note that this must be called in another thread.
"""
if not self.process:
return
self.stop_flag = True
self.wait_event.wait()
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET/POST request."""
def _SendResponse(self, response_code, response_reason, additional_headers,
contents):
"""Generates a response sent to the client from the provided parameters.
Args:
response_code: number of the response status.
response_reason: string of reason description of the response.
additional_headers: dict of additional headers. Each key is the name of
the header, each value is the content of the header.
contents: string of the contents we want to send to client.
"""
self.send_response(response_code, response_reason)
self.send_header('Content-Type', 'text/html')
# Specify the content-length as without it the http(s) response will not
# be completed properly (and the browser keeps expecting data).
self.send_header('Content-Length', len(contents))
for header_name in additional_headers:
self.send_header(header_name, additional_headers[header_name])
self.end_headers()
self.wfile.write(contents)
self.wfile.flush()
def _StartTestServer(self):
"""Starts the test server thread."""
logging.info('Handling request to spawn a test server.')
content_type = self.headers.getheader('content-type')
if content_type != 'application/json':
raise Exception('Bad content-type for start request.')
content_length = self.headers.getheader('content-length')
if not content_length:
content_length = 0
try:
content_length = int(content_length)
except:
raise Exception('Bad content-length for start request.')
logging.info(content_length)
test_server_argument_json = self.rfile.read(content_length)
logging.info(test_server_argument_json)
assert not self.server.test_server_instance
ready_event = threading.Event()
self.server.test_server_instance = TestServerThread(
ready_event,
json.loads(test_server_argument_json),
self.server.device,
self.server.tool)
self.server.test_server_instance.setDaemon(True)
self.server.test_server_instance.start()
ready_event.wait()
if self.server.test_server_instance.is_ready:
self._SendResponse(200, 'OK', {}, json.dumps(
{'port': self.server.test_server_instance.forwarder_device_port,
'message': 'started'}))
logging.info('Test server is running on port: %d.',
self.server.test_server_instance.host_port)
else:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during starting a test server.')
def _KillTestServer(self):
"""Stops the test server instance."""
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
if not self.server.test_server_instance:
return
port = self.server.test_server_instance.host_port
logging.info('Handling request to kill a test server on port: %d.', port)
self.server.test_server_instance.Stop()
# Make sure the status of test server is correct before sending response.
if _CheckPortAvailable(port):
self._SendResponse(200, 'OK', {}, 'killed')
logging.info('Test server on port %d is killed', port)
else:
self._SendResponse(500, 'Test Server Error.', {}, '')
logging.info('Encounter problem during killing a test server.')
self.server.test_server_instance = None
def do_POST(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
logging.info('Action for POST method is: %s.', action)
if action == '/start':
self._StartTestServer()
else:
self._SendResponse(400, 'Unknown request.', {}, '')
logging.info('Encounter unknown request: %s.', action)
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action for GET method is: %s.', action)
for param in params:
logging.info('%s=%s', param, params[param][0])
if action == '/kill':
self._KillTestServer()
elif action == '/ping':
# The ping handler is used to check whether the spawner server is ready
# to serve the requests. We don't need to test the status of the test
# server when handling ping request.
self._SendResponse(200, 'OK', {}, 'ready')
logging.info('Handled ping request and sent response.')
else:
self._SendResponse(400, 'Unknown request', {}, '')
logging.info('Encounter unknown request: %s.', action)
class SpawningServer(object):
"""The class used to start/stop a http server."""
def __init__(self, test_server_spawner_port, device, tool):
logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.server.device = device
self.server.tool = tool
self.server.test_server_instance = None
self.server.build_type = constants.GetBuildType()
def _Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
"""Starts the test server spawner."""
listener_thread = threading.Thread(target=self._Listen)
listener_thread.setDaemon(True)
listener_thread.start()
def Stop(self):
"""Stops the test server spawner.
Also cleans the server state.
"""
self.CleanupState()
self.server.shutdown()
def CleanupState(self):
"""Cleans up the spawning server state.
This should be called if the test server spawner is reused,
to avoid sharing the test server instance.
"""
if self.server.test_server_instance:
self.server.test_server_instance.Stop()
self.server.test_server_instance = None
|
sync_cross_cluster.py
|
#!/usr/bin/env python2
# coding:utf-8
import copy
import errno
import getopt
import logging
import os
import sys
import threading
import time
import traceback
import boto3
import yaml
from botocore.client import Config
from pykit import fsutil
from pykit import jobq
from pykit import logutil
from pykit import threadutil
from pykit import utfjson
from pykit import utfyaml
report_state_lock = threading.RLock()
logger = logging.getLogger(__name__)
sync_state = {
'total_n': 0,
'total_bytes': 0,
'exist': 0,
'check_dest_file_error': 0,
'check_dest_file_error_list': [],
'config_override': 0,
'force_override': 0,
'piped': 0,
'piped_bytes': 0,
'pipe_succeed': 0,
'pipe_succeed_bytes': 0,
'pipe_failed': 0,
'pipe_failed_bytes': 0,
'pipe_failed_exception_error': 0,
'pipe_failed_exception_error_list': [],
}
def _thread(func, args):
th = threading.Thread(target=func, args=args)
th.daemon = True
th.start()
return th
def get_conf(conf_path):
with open(conf_path) as f:
conf = utfyaml.load(f.read())
return conf
def get_boto_client(endpoint, access_key, secret_key):
client = boto3.client(
's3',
use_ssl=False,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=Config(signature_version='s3v4'),
region_name='us-east-1',
endpoint_url=endpoint,
)
return client
def load_progress():
progress_file = cnf['PROGRESS_FILE']
if os.path.isfile(progress_file):
progress = utfjson.load(fsutil.read_file(progress_file))
else:
progress = {
'marker': '',
'total_n': 0,
'total_size': 0.
}
return progress
def store_progress():
fsutil.write_file(cnf['PROGRESS_FILE'], utfjson.dump(current_progress))
def clear_progress():
fsutil.remove(cnf['PROGRESS_FILE'])
def get_file_info(client, bucket, key):
resp = client.head_object(
Bucket=bucket,
Key=key,
)
return resp
def iter_files(client, bucket):
marker = current_progress['marker']
start_marker = cnf.get('START_MARKER', '')
if start_marker > marker:
marker = start_marker
end_marker = cnf.get('END_MARKER', None)
while True:
resp = client.list_objects(
Bucket=bucket,
Marker=marker,
)
if 'Contents' not in resp:
print 'list file end'
break
for content in resp['Contents']:
if end_marker is not None and content['Key'] >= end_marker:
print 'list file end marker reacched'
return
marker = content['Key']
yield content
current_progress['total_n'] += 1
current_progress['total_size'] += content['Size']
current_progress['marker'] = content['Key']
if current_progress['total_n'] % 10000 == 0:
store_progress()
store_progress()
def pipe_file(result):
result['piped'] = True
file_object = result['file_object']
try:
src_resp = src_client.get_object(
Bucket=cnf['SRC_BUCKET'],
Key=file_object['Key'],
)
dest_body = src_resp['Body'].read()
dest_object = dest_client.put_object(
Bucket=cnf['DEST_BUCKET'],
Key=result['dest_key'],
ContentLength=src_resp['ContentLength'],
ContentType=src_resp['ContentType'],
Metadata=src_resp['Metadata'],
Body=dest_body,
)
if src_resp['ETag'] == dest_object['ETag']:
result['pipe_succeed'] = True
result['src_file_info'] = file_object
return True
else:
raise RuntimeError('ETagNotEqual')
except Exception as e:
result['pipe_failed'] = True
result['pipe_failed_exception_error'] = {
'key': file_object['Key'],
'error': repr(e),
}
logger.error('got exception when pipe file %s: %s' %
(file_object['Key'], traceback.format_exc()))
return False
def convert_key(key):
return key
def check_dest_file(result):
try:
src_file_info = get_file_info(
src_client, cnf['SRC_BUCKET'], result['dest_key'])
dest_file_info = get_file_info(
dest_client, cnf['DEST_BUCKET'], result['dest_key'])
except Exception as e:
if hasattr(e, 'message') and 'Not Found' in e.message:
return True
else:
result['check_dest_file_error'] = {
'key': result['dest_key'],
'error': repr(e),
}
logger.error('faied to get dest file info in {k}: {t}'.format(
k=repr(result['dest_key']), t=repr(traceback.format_exc())))
return False
result['exist'] = True
if cnf['FORCE_OVERRIDE']:
result['force_override'] = True
logger.info('need to override file:{k} because FORCE_OVERRIDE is True'.format(
k=repr(result['dest_key'])))
return True
else:
for metric in cnf['CONFIG_OVERRIDE']:
if dest_file_info[metric] != src_file_info[metric]:
result['config_override'] = True
logger.info('need to overrid file:{k},because CONFIG_OVERRIDE:{m} is configured'.format(
k=repr(result['dest_key']), m=metric))
return True
return False
def sync_one_file(file_object):
result = {
'file_object': file_object,
'dest_key': convert_key(file_object['Key'])
}
check_dest_file(result)
if not check_dest_file(result):
return result
if not pipe_file(result):
return result
return result
def update_sync_stat(result):
file_object = result['file_object']
sync_state['total_n'] += 1
sync_state['total_bytes'] += file_object['Size']
if 'check_dest_file_error' in result:
sync_state['check_dest_file_error'] += 1
sync_state['check_dest_file_error_list'].append(
result['check_dest_file_error'])
return
if 'exist' in result:
sync_state['exist'] += 1
if 'config_override' in result:
sync_state['config_override'] += 1
elif 'force_override' in result:
sync_state['force_override'] += 1
if not 'piped' in result:
return
sync_state['piped'] += 1
sync_state['piped_bytes'] += file_object['Size']
if 'pipe_failed' in result:
sync_state['pipe_failed'] += 1
sync_state['pipe_failed_bytes'] += file_object['Size']
if 'pipe_failed_exception_error' in result:
sync_state['pipe_failed_exception_error'] += 1
sync_state['pipe_failed_exception_error_list'].append(
result['pipe_failed_exception_error'])
return
sync_state['pipe_succeed'] += 1
sync_state['pipe_succeed_bytes'] += file_object['Size']
def _report_state():
os.system('clear')
print (('src bucket name: %s, prefix: %s, start marker: %s, ' +
'end marker: %s, dest bucket name: %s') %
(cnf['SRC_BUCKET'], cnf['PREFIX'], cnf['START_MARKER'],
cnf['END_MARKER'], cnf['DEST_BUCKET']))
print ''
print (('previous iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(previous_progress['total_n'],
previous_progress['total_size'],
previous_progress['marker']))
print (('current iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(current_progress['total_n'],
current_progress['total_size'],
current_progress['marker']))
print ''
print ('total number: %d, total bytes: %d' %
(sync_state['total_n'], sync_state['total_bytes']))
print ''
print 'get dest file info failed: %d' % sync_state['check_dest_file_error']
print (('exist: %d, config_override: %d, force_override: %d, ') %
(sync_state['exist'],
sync_state['config_override'],
sync_state['force_override'],
))
print ''
print 'piped: %d, piped_bytes: %d' % (sync_state['piped'],
sync_state['piped_bytes'])
print ('pipe succeed: %d, pipe succeed bytes: %d' %
(sync_state['pipe_succeed'],
sync_state['pipe_succeed_bytes']))
print ('pipe failed: %d, pipe failed bytes: %d' %
(sync_state['pipe_failed'],
sync_state['pipe_failed_bytes']))
def report_state():
with report_state_lock:
_report_state()
def report(sess):
while not sess['stop']:
report_state()
time.sleep(cnf['REPORT_INTERVAL'])
def dump_state():
fsutil.write_file(cnf['STATE_FILE'], utfjson.dump(sync_state))
def sync():
try:
report_sess = {'stop': False}
report_th = threadutil.start_thread(
report, args=(report_sess,), daemon=True)
jobq.run(iter_files(src_client, cnf['SRC_BUCKET']), [(sync_one_file, 3),
(update_sync_stat, 1),
])
report_sess['stop'] = True
report_th.join()
except KeyboardInterrupt:
logger.exception('get KeyboardInterrupt')
sys.exit(0)
finally:
report_state()
dump_state()
if __name__ == "__main__":
logutil.make_logger(base_dir='/var/log/opstool', level='INFO')
opts, args = getopt.getopt(sys.argv[1:], '', ['conf=', ])
opts = dict(opts)
if opts.get('--conf') is None:
conf_path = '../conf/sync_cross_cluster.yaml'
else:
conf_path = opts['--conf']
cnf = get_conf(conf_path)
src_client = get_boto_client(
cnf['SRC_ENDPOINT'],
cnf['SRC_ACCESS_KEY'],
cnf['SRC_SECRET_KEY'])
dest_client = get_boto_client(
cnf['DEST_ENDPOINT'],
cnf['DEST_ACCESS_KEY'],
cnf['DEST_SECRET_KEY'])
thread_status = {}
cmd = args[0]
if cmd == 'sync':
current_progress = load_progress()
previous_progress = copy.deepcopy(current_progress)
sync()
elif cmd == 'clear_progress':
clear_progress()
|
test.py
|
import json
import pytest
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
stay_alive=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1} )
node2 = cluster.add_instance('node2',
config_dir='configs',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
stay_alive=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2} )
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_system_tables(start_cluster):
expected_disks_data = [
{
"name": "default",
"path": "/var/lib/clickhouse/",
"keep_free_space": '1024',
},
{
"name": "jbod1",
"path": "/jbod1/",
"keep_free_space": '0',
},
{
"name": "jbod2",
"path": "/jbod2/",
"keep_free_space": '10485760',
},
{
"name": "external",
"path": "/external/",
"keep_free_space": '0',
}
]
click_disk_data = json.loads(node1.query("SELECT name, path, keep_free_space FROM system.disks FORMAT JSON"))["data"]
assert sorted(click_disk_data, key=lambda x: x["name"]) == sorted(expected_disks_data, key=lambda x: x["name"])
expected_policies_data = [
{
"policy_name": "small_jbod_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "small_jbod_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "one_more_small_jbod_with_external",
"volume_name": "m",
"volume_priority": "1",
"disks": ["jbod1"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "one_more_small_jbod_with_external",
"volume_name": "e",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "jbods_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1", "jbod2"],
"max_data_part_size": "10485760",
"move_factor": 0.1,
},
{
"policy_name": "jbods_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "moving_jbod_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"max_data_part_size": "0",
"move_factor": 0.7,
},
{
"policy_name": "moving_jbod_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "0",
"move_factor": 0.7,
},
{
"policy_name": "default_disk_with_external",
"volume_name": "small",
"volume_priority": "1",
"disks": ["default"],
"max_data_part_size": "2097152",
"move_factor": 0.1,
},
{
"policy_name": "default_disk_with_external",
"volume_name": "big",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "20971520",
"move_factor": 0.1,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_zero_volume",
"volume_priority": "1",
"disks": ["default"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_default_volume",
"volume_priority": "2",
"disks": ["external"],
"max_data_part_size": "0",
"move_factor": 0.1,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_small_volume",
"volume_priority": "3",
"disks": ["jbod1"],
"max_data_part_size": "1024",
"move_factor": 0.1,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_big_volume",
"volume_priority": "4",
"disks": ["jbod2"],
"max_data_part_size": "1024000000",
"move_factor": 0.1,
},
]
clickhouse_policies_data = json.loads(node1.query("SELECT * FROM system.storage_policies WHERE policy_name != 'default' FORMAT JSON"))["data"]
def key(x):
return (x["policy_name"], x["volume_name"], x["volume_priority"])
assert sorted(clickhouse_policies_data, key=key) == sorted(expected_policies_data, key=key)
def test_query_parser(start_cluster):
try:
with pytest.raises(QueryRuntimeException):
node1.query("""
CREATE TABLE table_with_absent_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='very_exciting_policy'
""")
with pytest.raises(QueryRuntimeException):
node1.query("""
CREATE TABLE table_with_absent_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='jbod1'
""")
node1.query("""
CREATE TABLE table_with_normal_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='default'
""")
node1.query("INSERT INTO table_with_normal_policy VALUES (5)")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO VOLUME 'some_volume'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO DISK 'some_volume'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PART 'xxxxx' TO DISK 'jbod1'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION 'yyyy' TO DISK 'jbod1'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'")
finally:
node1.query("DROP TABLE IF EXISTS table_with_normal_policy")
@pytest.mark.parametrize("name,engine", [
("test_alter_policy","MergeTree()"),
("replicated_test_alter_policy","ReplicatedMergeTree('/clickhouse/test_alter_policy', '1')",),
])
def test_alter_policy(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
d UInt64
) ENGINE = {engine}
ORDER BY d
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(name=name)) == "small_jbod_with_external\n"
with pytest.raises(QueryRuntimeException):
node1.query("""ALTER TABLE {name} MODIFY SETTING storage_policy='one_more_small_jbod_with_external'""".format(name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(name=name)) == "small_jbod_with_external\n"
node1.query("""ALTER TABLE {name} MODIFY SETTING storage_policy='jbods_with_external'""".format(name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(name=name)) == "jbods_with_external\n"
with pytest.raises(QueryRuntimeException):
node1.query("""ALTER TABLE {name} MODIFY SETTING storage_policy='small_jbod_with_external'""".format(name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(name=name)) == "jbods_with_external\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
def get_random_string(length):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def get_used_disks_for_table(node, table_name):
return node.query("select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format(table_name)).strip().split('\n')
def test_no_warning_about_zero_max_data_part_size(start_cluster):
def get_log(node):
return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"])
for node in (node1, node2):
node.query("""
CREATE TABLE default.test_warning_table (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""")
node.query("""
DROP TABLE default.test_warning_table
""")
log = get_log(node)
assert not re.search("Warning.*Volume.*special_warning_zero_volume", log)
assert not re.search("Warning.*Volume.*special_warning_default_volume", log)
assert re.search("Warning.*Volume.*special_warning_small_volume", log)
assert not re.search("Warning.*Volume.*special_warning_big_volume", log)
@pytest.mark.parametrize("name,engine", [
("mt_on_jbod","MergeTree()"),
("replicated_mt_on_jbod","ReplicatedMergeTree('/clickhouse/replicated_mt_on_jbod', '1')",),
])
def test_round_robin(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
d UInt64
) ENGINE = {engine}
ORDER BY d
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
# first should go to the jbod1
node1.query("insert into {} select * from numbers(10000)".format(name))
used_disk = get_used_disks_for_table(node1, name)
assert len(used_disk) == 1, 'More than one disk used for single insert'
node1.query("insert into {} select * from numbers(10000, 10000)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert len(used_disks) == 2, 'Two disks should be used for two parts'
assert used_disks[0] != used_disks[1], "Should write to different disks"
node1.query("insert into {} select * from numbers(20000, 10000)".format(name))
used_disks = get_used_disks_for_table(node1, name)
# jbod1 -> jbod2 -> jbod1 -> jbod2 ... etc
assert len(used_disks) == 3
assert used_disks[0] != used_disks[1]
assert used_disks[2] == used_disks[0]
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("mt_with_huge_part","MergeTree()"),
("replicated_mt_with_huge_part","ReplicatedMergeTree('/clickhouse/replicated_mt_with_huge_part', '1')",),
])
def test_max_data_part_size(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert len(used_disks) == 1
assert used_disks[0] == 'external'
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("mt_with_overflow","MergeTree()"),
("replicated_mt_with_overflow","ReplicatedMergeTree('/clickhouse/replicated_mt_with_overflow', '1')",),
])
def test_jbod_overflow(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES")
# small jbod size is 40MB, so lets insert 5MB batch 7 times
for i in range(7):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert all(disk == 'jbod1' for disk in used_disks)
# should go to the external disk (jbod is overflown)
data = [] # 10MB in total
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert used_disks[-1] == 'external'
node1.query("SYSTEM START MERGES")
time.sleep(1)
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
time.sleep(2)
disks_for_merges = node1.query("SELECT disk_name FROM system.parts WHERE table == '{}' AND level >= 1 and active = 1 ORDER BY modification_time".format(name)).strip().split('\n')
assert all(disk == 'external' for disk in disks_for_merges)
finally:
node1.query("DROP TABLE IF EXISTS {}".format(name))
@pytest.mark.parametrize("name,engine", [
("moving_mt","MergeTree()"),
("moving_replicated_mt","ReplicatedMergeTree('/clickhouse/moving_replicated_mt', '1')",),
])
def test_background_move(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
# small jbod size is 40MB, so lets insert 5MB batch 5 times
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
retry = 20
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
i += 1
assert sum(1 for x in used_disks if x == 'jbod1') <= 2
# first (oldest) part was moved to external
assert used_disks[0] == 'external'
path = node1.query("SELECT path_on_disk FROM system.part_log WHERE table = '{}' AND event_type='MovePart' ORDER BY event_time LIMIT 1".format(name))
# first (oldest) part was moved to external
assert path.startswith("/external")
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("stopped_moving_mt","MergeTree()"),
("stopped_moving_replicated_mt","ReplicatedMergeTree('/clickhouse/stopped_moving_replicated_mt', '1')",),
])
def test_start_stop_moves(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
node1.query("INSERT INTO {} VALUES ('HELLO')".format(name))
node1.query("INSERT INTO {} VALUES ('WORLD')".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d == "jbod1" for d in used_disks), "All writes shoud go to jbods"
first_part = node1.query("SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format(name)).strip()
node1.query("SYSTEM STOP MOVES")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
used_disks = get_used_disks_for_table(node1, name)
assert all(d == "jbod1" for d in used_disks), "Blocked moves doesn't actually move something"
node1.query("SYSTEM START MOVES")
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
disk = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, first_part)).strip()
assert disk == "external"
node1.query("TRUNCATE TABLE {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
node1.query("SYSTEM STOP MERGES {}".format(name))
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
# jbod size is 40MB, so lets insert 5MB batch 7 times
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
retry = 5
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(0.1)
used_disks = get_used_disks_for_table(node1, name)
i += 1
# first (oldest) part doesn't move anywhere
assert used_disks[0] == 'jbod1'
node1.query("SYSTEM START MOVES {}".format(name))
node1.query("SYSTEM START MERGES {}".format(name))
# wait sometime until background backoff finishes
retry = 30
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
i += 1
assert sum(1 for x in used_disks if x == 'jbod1') <= 2
# first (oldest) part moved to external
assert used_disks[0] == 'external'
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
def get_path_for_part_from_part_log(node, table, part_name):
node.query("SYSTEM FLUSH LOGS")
path = node.query("SELECT path_on_disk FROM system.part_log WHERE table = '{}' and part_name = '{}' ORDER BY event_time DESC LIMIT 1".format(table, part_name))
return path.strip()
def get_paths_for_partition_from_part_log(node, table, partition_id):
node.query("SYSTEM FLUSH LOGS")
paths = node.query("SELECT path_on_disk FROM system.part_log WHERE table = '{}' and partition_id = '{}' ORDER BY event_time DESC".format(table, partition_id))
return paths.strip().split('\n')
@pytest.mark.parametrize("name,engine", [
("altering_mt","MergeTree()"),
#("altering_replicated_mt","ReplicatedMergeTree('/clickhouse/altering_replicated_mt', '1')",),
# SYSTEM STOP MERGES doesn't disable merges assignments
])
def test_alter_move(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name)) # to avoid conflicts
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 66)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-04-10'), 42)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-04-11'), 43)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
first_part = node1.query("SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format(name)).strip()
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
disk = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, first_part)).strip()
assert disk == 'external'
assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/external")
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PART '{}' TO DISK 'jbod1'".format(name, first_part))
disk = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, first_part)).strip()
assert disk == 'jbod1'
assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/jbod1")
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO VOLUME 'external'".format(name))
disks = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format(name)).strip().split('\n')
assert len(disks) == 2
assert all(d == "external" for d in disks)
assert all(path.startswith("/external") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2])
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO DISK 'jbod2'".format(name))
disks = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format(name)).strip().split('\n')
assert len(disks) == 2
assert all(d == "jbod2" for d in disks)
assert all(path.startswith("/jbod2") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2])
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "4\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("volume_or_disk", [
"DISK",
"VOLUME"
])
def test_alter_move_half_of_partition(start_cluster, volume_or_disk):
name = "alter_move_half_of_partition"
engine = "MergeTree()"
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
time.sleep(1)
parts = node1.query("SELECT name FROM system.parts WHERE table = '{}' and active = 1".format(name)).splitlines()
assert len(parts) == 2
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, parts[0]))
disks = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, parts[0])).splitlines()
assert disks == ["external"]
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, volume_or_disk=volume_or_disk))
disks = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format(name)).splitlines()
assert disks == ["external"]*2
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("volume_or_disk", [
"DISK",
"VOLUME"
])
def test_alter_double_move_partition(start_cluster, volume_or_disk):
name = "alter_double_move_partition"
engine = "MergeTree()"
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, volume_or_disk=volume_or_disk))
disks = node1.query("SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format(name)).splitlines()
assert disks == ["external"]*2
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n"
time.sleep(1)
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name, volume_or_disk=volume_or_disk))
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query("SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException as ex:
pass
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("concurrently_altering_mt","MergeTree()"),
("concurrently_altering_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_altering_replicated_mt', '1')",),
])
def test_concurrent_alter_move(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def optimize_table(num):
for i in range(num):
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("concurrently_dropping_mt","MergeTree()"),
("concurrently_dropping_replicated_mt","ReplicatedMergeTree('/clickhouse/concurrently_dropping_replicated_mt', '1')",),
])
def test_concurrent_alter_move_and_drop(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_drop(num):
for i in range(num):
partition = random.choice([201903, 201904])
drach = random.choice(["drop", "detach"])
node1.query("ALTER TABLE {} {} PARTITION {}".format(name, drach, partition))
insert(100)
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_drop, (100,)))
for task in tasks:
task.get(timeout=60)
assert node1.query("SELECT 1") == "1\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("name,engine", [
("detach_attach_mt","MergeTree()"),
("replicated_detach_attach_mt","ReplicatedMergeTree('/clickhouse/replicated_detach_attach_mt', '1')",),
])
def test_detach_attach(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
node1.query("ALTER TABLE {} DETACH PARTITION tuple()".format(name))
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "0"
assert node1.query("SELECT disk FROM system.detached_parts WHERE table = '{}'".format(name)).strip() == "jbod1"
node1.query("ALTER TABLE {} ATTACH PARTITION tuple()".format(name))
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "5"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.parametrize("name,engine", [
("mutating_mt","MergeTree()"),
("replicated_mutating_mt","ReplicatedMergeTree('/clickhouse/replicated_mutating_mt', '1')",),
])
def test_mutate_to_another_disk(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
retry = 20
while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0:
retry -= 1
time.sleep(0.5)
if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "":
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
else: # mutation failed, let's try on another disk
print "Mutation failed"
node1.query("OPTIMIZE TABLE {} FINAL".format(name))
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
retry = 20
while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0:
retry -= 1
time.sleep(0.5)
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
@pytest.mark.skip(reason="Flappy test")
@pytest.mark.parametrize("name,engine", [
("alter_modifying_mt","MergeTree()"),
("replicated_alter_modifying_mt","ReplicatedMergeTree('/clickhouse/replicated_alter_modifying_mt', '1')",),
])
def test_concurrent_alter_modify(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({ random.randint(1, 1000000) for _ in range(0, 1000) })
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_modify(num):
for i in range(num):
column_type = random.choice(["UInt64", "String"])
node1.query("ALTER TABLE {} MODIFY COLUMN number {}".format(name, column_type))
insert(100)
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "100\n"
p = Pool(50)
tasks = []
for i in range(5):
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_modify, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "100\n"
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
def test_simple_replication_and_moves(start_cluster):
try:
for i, node in enumerate([node1, node2]):
node.query("""
CREATE TABLE replicated_table_for_moves (
s1 String
) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_moves', '{}')
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2
""".format(i + 1))
def insert(num):
for i in range(num):
node = random.choice([node1, node2])
data = [] # 1MB in total
for i in range(2):
data.append(get_random_string(512 * 1024)) # 500KB value
node.query("INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data])))
def optimize(num):
for i in range(num):
node = random.choice([node1, node2])
node.query("OPTIMIZE TABLE replicated_table_for_moves FINAL")
p = Pool(60)
tasks = []
tasks.append(p.apply_async(insert, (20,)))
tasks.append(p.apply_async(optimize, (20,)))
for task in tasks:
task.get(timeout=60)
node1.query("SYSTEM SYNC REPLICA replicated_table_for_moves", timeout=5)
node2.query("SYSTEM SYNC REPLICA replicated_table_for_moves", timeout=5)
node1.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n"
node2.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n"
data = [] # 1MB in total
for i in range(2):
data.append(get_random_string(512 * 1024)) # 500KB value
time.sleep(3) # wait until old parts will be deleted
node1.query("SYSTEM STOP MERGES")
node2.query("SYSTEM STOP MERGES")
node1.query("INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data])))
node2.query("INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data])))
time.sleep(3) # nothing was moved
disks1 = get_used_disks_for_table(node1, "replicated_table_for_moves")
disks2 = get_used_disks_for_table(node2, "replicated_table_for_moves")
node1.query("SYSTEM START MERGES")
node2.query("SYSTEM START MERGES")
set(disks1) == set(["jbod1", "external"])
set(disks2) == set(["jbod1", "external"])
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS replicated_table_for_moves")
def test_download_appropriate_disk(start_cluster):
try:
for i, node in enumerate([node1, node2]):
node.query("""
CREATE TABLE replicated_table_for_download (
s1 String
) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_download', '{}')
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2
""".format(i + 1))
data = []
for i in range(50):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query("INSERT INTO replicated_table_for_download VALUES {}".format(','.join(["('" + x + "')" for x in data])))
for _ in range(10):
try:
print "Syncing replica"
node2.query("SYSTEM SYNC REPLICA replicated_table_for_download")
break
except:
time.sleep(0.5)
disks2 = get_used_disks_for_table(node2, "replicated_table_for_download")
assert set(disks2) == set(["external"])
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS replicated_table_for_download")
def test_rename(start_cluster):
try:
node1.query("""
CREATE TABLE default.renaming_table (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""")
for _ in range(5):
data = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query("INSERT INTO renaming_table VALUES {}".format(','.join(["('" + x + "')" for x in data])))
disks = get_used_disks_for_table(node1, "renaming_table")
assert len(disks) > 1
assert node1.query("SELECT COUNT() FROM default.renaming_table") == "50\n"
node1.query("RENAME TABLE default.renaming_table TO default.renaming_table1")
assert node1.query("SELECT COUNT() FROM default.renaming_table1") == "50\n"
with pytest.raises(QueryRuntimeException):
node1.query("SELECT COUNT() FROM default.renaming_table")
node1.query("CREATE DATABASE IF NOT EXISTS test")
node1.query("RENAME TABLE default.renaming_table1 TO test.renaming_table2")
assert node1.query("SELECT COUNT() FROM test.renaming_table2") == "50\n"
with pytest.raises(QueryRuntimeException):
node1.query("SELECT COUNT() FROM default.renaming_table1")
finally:
node1.query("DROP TABLE IF EXISTS default.renaming_table")
node1.query("DROP TABLE IF EXISTS default.renaming_table1")
node1.query("DROP TABLE IF EXISTS test.renaming_table2")
def test_freeze(start_cluster):
try:
node1.query("""
CREATE TABLE default.freezing_table (
d Date,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY toYYYYMM(d)
SETTINGS storage_policy='small_jbod_with_external'
""")
for _ in range(5):
data = []
dates = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
dates.append("toDate('2019-03-05')")
node1.query("INSERT INTO freezing_table VALUES {}".format(','.join(["(" + d + ", '" + s + "')" for d, s in zip(dates, data)])))
disks = get_used_disks_for_table(node1, "freezing_table")
assert len(disks) > 1
assert node1.query("SELECT COUNT() FROM default.freezing_table") == "50\n"
node1.query("ALTER TABLE freezing_table FREEZE PARTITION 201903")
# check shadow files (backups) exists
node1.exec_in_container(["bash", "-c", "find /jbod1/shadow -name '*.mrk2' | grep '.*'"])
node1.exec_in_container(["bash", "-c", "find /external/shadow -name '*.mrk2' | grep '.*'"])
finally:
node1.query("DROP TABLE IF EXISTS default.freezing_table")
node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"])
def test_kill_while_insert(start_cluster):
try:
name = "test_kill_while_insert"
node1.query("""
CREATE TABLE {name} (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
data = []
dates = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query("INSERT INTO {name} VALUES {}".format(','.join(["('" + s + "')" for s in data]), name=name))
disks = get_used_disks_for_table(node1, name)
assert set(disks) == {"jbod1"}
start_time = time.time()
long_select = threading.Thread(target=node1.query, args=("SELECT sleep(3) FROM {name}".format(name=name),))
long_select.start()
time.sleep(0.5)
node1.query("ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'external'".format(name=name))
assert time.time() - start_time < 2
node1.restart_clickhouse(kill=True)
try:
long_select.join()
except:
""""""
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["10"]
finally:
try:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
except:
"""ClickHouse may be inactive at this moment and we don't want to mask a meaningful exception."""
def test_move_while_merge(start_cluster):
try:
name = "test_move_while_merge"
node1.query("""
CREATE TABLE {name} (
n Int64
) ENGINE = MergeTree
ORDER BY sleep(2)
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
parts = node1.query("SELECT name FROM system.parts WHERE table = '{name}' AND active = 1".format(name=name)).splitlines()
assert len(parts) == 2
def optimize():
node1.query("OPTIMIZE TABLE {name}".format(name=name))
optimize = threading.Thread(target=optimize)
optimize.start()
time.sleep(0.5)
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
exiting = False
no_exception = {}
def alter():
while not exiting:
try:
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
no_exception['missing'] = 'exception'
break
except QueryRuntimeException:
""""""
alter_thread = threading.Thread(target=alter)
alter_thread.start()
optimize.join()
time.sleep(0.5)
exiting = True
alter_thread.join()
assert len(no_exception) == 0
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["2"]
finally:
node1.query("DROP TABLE IF EXISTS {name}".format(name=name))
|
main.py
|
import multiprocessing
from mlapp.env_loader import EnvironmentLoader
from mlapp.utils.general import read_json_file
import importlib
import uuid
import json
from mlapp.handlers.wrappers.database_wrapper import database_instance
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance
from mlapp.handlers.wrappers.message_queue_wrapper import message_queue_instance
from mlapp.handlers.wrappers.spark_wrapper import spark_instance
from mlapp.managers.flow_manager import FlowManager
import traceback
from ast import literal_eval
from mlapp.config import settings, environment_services
import os
class MLApp(object):
MLAPP_SERVICE_TYPE = '_MLAPP_SERVICE_TYPE'
def __init__(self, inner_settings=None):
"""
Constructor for the MLApp Class.
This class, when becomes instantiated is the main endpoint for the ML App Library.
To this constructor you pass in your custom settings, which sets up your customized configuration of ML App,
and Environment that you defined with the ML App CLI.
After the instantiation you can use the instance to run:
- Flows (single-process and multi-process)
- Applications/Workers (using the queue listener)
- Send configurations to an outside queue
"""
if inner_settings is None:
inner_settings = {}
for key in inner_settings:
if isinstance(inner_settings[key], dict):
if key not in settings:
settings[key] = {}
settings[key].update(inner_settings[key])
elif isinstance(inner_settings[key], list):
if key not in settings:
settings[key] = []
settings[key] += inner_settings[key]
else:
settings[key] = inner_settings[key]
# init environment services
env = EnvironmentLoader.load(filename=inner_settings.get('env_file_path', ''))
env_services_dict = {
k.replace(self.MLAPP_SERVICE_TYPE, ''): os.environ[k].lower()
for k in os.environ if k.endswith(self.MLAPP_SERVICE_TYPE)
}
settings['services'] = EnvironmentLoader.create_services(env, env_services_dict, environment_services)
for wrapper_instance in [file_storage_instance, database_instance, message_queue_instance, spark_instance]:
wrapper_instance.init()
# ======== TASK RUN ==========
def _on_callback(self, message_body):
"""
This is the function that executes the configuration sent to the application/worker.
:param message_body: the configuration in bytes/string format.
:return: None
"""
job_id = 'None'
try:
message_body = message_body.decode("utf-8")
except AttributeError as attrError:
pass # message body is string and not bytes
print("Hello, The following task is consumed: " + str(message_body))
try:
message_config = json.loads(literal_eval(message_body))
except Exception as first_error:
try:
message_config = json.loads(message_body)
except Exception as second_error:
print("Error response: " + str('Message not in JSON format'))
traceback.print_exc()
self._send_error_response_to_mq(job_id, '-1', str('Message not in JSON format'))
return
print(message_config)
try:
job_id = str(message_config.get('job_id', str(uuid.uuid4())))
results = self._run_flow(job_id, message_config)
self._send_ok_response_to_mq(
job_id, results.get('status_code', -1), 'all went ok', results.get('response', {}))
except Exception as error:
print("Error response: " + str(error))
traceback.print_exc()
self._send_error_response_to_mq(job_id, '-1', str(error))
finally:
pass
# =========== TASK HANDLERS ===========
def _run_flow(self, job_id, config):
"""
This is the function that executes the Flow of your configuration.
:param job_id: the job identifier used for monitoring via the Control Panel.
:param config: the configuration as Dictionary.
:return: Dictionary containing the status and response of the flow run.
"""
# update job
database_instance.update_job_running(job_id)
# call Flow_manager to run the job
status_code, response, _ = FlowManager(job_id, config).run()
return {'status_code': status_code, 'response': response}
# ======== MQ HANDLERS =========
def _send_ok_response_to_mq(self, job_id, status_code, status_msg, result):
"""
This function sends response back to the Control Panel via queue if the job succeeded
:param job_id: the job identifier used for monitoring via the Control Panel.
:param status_code: result status of the flow run.
:param status_msg: result message of the flow run.
:param result: response of the flow run - if json serialized returned in the message queue as well.
:return: None
"""
response_obj = {
"job_id": job_id, "status_code": status_code, "status_msg": status_msg
}
try:
# trying to JSON-ify result object
response_obj['result'] = result
response_json = json.dumps(response_obj)
except Exception as error:
print(error)
response_obj['result'] = {}
response_json = json.dumps(response_obj)
message_queue_instance.send_message(settings['queues']['send_queue_name'], response_json)
def _send_error_response_to_mq(self, job_id, status_code, status_msg):
"""
This function sends response back to the Control Panel via queue if the job failed
:param job_id: the job identifier used for monitoring via the Control Panel.
:param status_code: error status of the flow run.
:param status_msg: error message of the flow run.
:return: None
"""
response_json = json.dumps({"job_id": job_id, "status_code": status_code, "status_msg": status_msg})
message_queue_instance.send_message(settings['queues']['send_queue_name'], response_json)
def _dispatch_jobs_to_mq(self, configurations):
"""
This function sends configurations to the queue to be picked up later by a listening Application/Worker.
:param configurations: list of configurations to be sent
:return: None
"""
for configuration in configurations:
response_json = json.dumps(configuration)
message_queue_instance.send_message(settings['queues']['send_queue_name'], json.dumps(response_json))
# ======== LISTENER =========
def run_listener(self):
"""
This function is an endpoint of the ML App Library to be used in an Application/Worker.
It sets up a listening queue indefinitely waiting for configuration to process upon receive.
"""
message_queue_instance.listen_to_queues(settings['queues']['listen_queue_names'], self._on_callback)
# ======== RUN CONFIG =========
def run_flow(self, asset_name, config_path, config_name=None, **kwargs):
"""
This function is an endpoint of the ML App Library to be used in a local environment.
It runs a local configuration file in your local computer.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: in case configuration file is python looks for variable in this name as the configuration
"""
job_id = str(uuid.uuid4())
try:
config = read_json_file(config_path)
except Exception as err:
config = self._read_py_file(asset_name, config_path, config_name)
self._insert_latest_id_in_config(config)
_, run_ids, outputs = FlowManager(job_id, config, **kwargs).run()
self._update_latest_model_id(config, run_ids)
@staticmethod
def run_flow_from_config(config):
return FlowManager("deployment", config).run()
# ======== SEND CONFIG TO MQ =========
def run_msg_sender(self, asset_name, config_path, config_name=None):
"""
This function is an endpoint of the ML App Library to be used in a local environment.
It sends a local configuration file in your local computer to be run in an outside Application/Worker via
message queue.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: in case configuration file is python looks for variable in this name as the configuration
"""
try:
message_to_send = read_json_file(config_path)
except Exception as e:
message_to_send = self._read_py_file(asset_name, config_path, config_name)
job_id = str(uuid.uuid4())
message_to_send['job_id'] = job_id
message_queue_instance.send_message(settings['queues']['listen_queue_names'][0], json.dumps(message_to_send))
print("Message Sent (job_id: " + job_id + "): ", asset_name, config_path)
# ======== RUN CONFIGS MULTIPROCESSING =========
def run_configs_multiprocessing(self, instructions):
"""
This function is an endpoint of the ML App Library.
It runs multiple configurations in multi-processing.
:param instructions: list of instruction to send to each process
"""
jobs = []
for instruction in instructions:
p = multiprocessing.Process(target=self._run_config_multiprocess, args=(instruction,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
# ======== HELPER PRIVATE FUNCTIONS =========
def _run_config_multiprocess(self, instruction):
"""
This function is executes instruction of a process when used by `run_configs_multiprocessing`.
:param instruction: instruction Dictionary containing asset_name, config_path and config_name.
- asset_name: name of the asset to be run
- config_path path to configuration file
- config_name in case configuration file is python looks for variable in this name as the configuration
"""
try:
self.run_flow(instruction['asset_name'], instruction['config_path'], instruction.get('config_name'))
except Exception as err:
print(err)
traceback.print_exc()
@staticmethod
def _read_py_file(asset_name, config_path, config_name):
"""
This function fetches a configuration Dictionary stored in a python file.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: variable in the python file containing the configuration
:return: Configuration as a Dictionary
"""
spec = importlib.util.spec_from_file_location(asset_name, config_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
return config.__dict__[config_name]
@staticmethod
def _insert_latest_id_in_config(config):
"""
This is a helper function for using `latest` feature in local environment.
Updates current configuration to be run with the latest id stored in a local file containing it
by reference of asset name.
:param config: current flow run configuration as a Dictionary
"""
# prepare latest file
local_path = settings.get('local_storage_path', 'output')
latest_file_name = settings.get('latest_file_name', 'latest_ids.json')
latest_ids_path = os.path.join(local_path, latest_file_name)
try:
with open(latest_ids_path) as f:
latest = json.load(f)
except:
latest = {}
# iterate pipelines
for i, pipeline in enumerate(config.get('pipelines_configs', [])):
# iterate optional ids
for id_type in ['model_id', 'data_id', 'reuse_features_id']:
# check if requested latest
if pipeline.get('job_settings', {}).get(id_type, None) == 'latest':
# get current asset name
asset_name = pipeline['job_settings']['asset_name']
# check if available id
if asset_name in latest:
# TODO: add here asset label level
# TODO: add here data_id/model_id/reuse_features_id
config['pipelines_configs'][i]['job_settings'][id_type] = latest[asset_name]
else:
# raise exception as not found id
raise Exception("Could not find latest `" + id_type + "` for `" + asset_name + "`. \n"
"Please update your config with a valid `" + id_type + "`")
@staticmethod
def _update_latest_model_id(config, run_ids):
"""
This is a helper function for using `latest` feature in local environment.
Updates local file containing the latest id used for an asset.
:param config: current flow run configuration as a Dictionary
:params run_ids: list of mlapp identifiers generated in the current flow run.
"""
# prepare latest file
local_path = settings.get('local_storage_path', 'output')
latest_file_name = settings.get('latest_file_name', 'latest_ids.json')
if not os.path.exists(local_path):
os.makedirs(local_path)
latest_ids_path = os.path.join(local_path, latest_file_name)
latest = {}
try:
with open(latest_ids_path) as f:
latest = json.load(f)
except:
pass
# iterate over pipelines
for pipeline, run_id in zip(config['pipelines_configs'], run_ids):
# check if ran any pipeline where id is being stored
# TODO: add here asset label level
# TODO: add here data_id/model_id/reuse_features_id
if pipeline['job_settings']['pipeline'] in ['train', 'feature_engineering']:
latest[pipeline['job_settings']['asset_name']] = run_id
with open(latest_ids_path, 'w') as f:
json.dump(latest, f)
|
window_title_async.py
|
# -*- coding: utf-8 -*-
"""
Display the current window title with async update.
Uses asynchronous update via i3 IPC events.
Provides instant title update only when it required.
Configuration parameters:
always_show: do not hide the title when it can be already
visible (e.g. in tabbed layout). (default False)
empty_title: string that will be shown instead of the title when
the title is hidden. (default "")
format: format of the title, (default "{title}")
max_width: maximum width of block (in symbols).
If the title is longer than `max_width`,
the title will be truncated to `max_width - 1`
first symbols with ellipsis appended. (default 120)
Requires:
i3ipc: (https://github.com/acrisci/i3ipc-python)
@author Anon1234 https://github.com/Anon1234
@license BSD
"""
from threading import Thread
import i3ipc
class Py3status:
"""
"""
# available configuration parameters
always_show = False
empty_title = ""
format = "{title}"
max_width = 120
def __init__(self):
self.title = self.empty_title
# we are listening to i3 events in a separate thread
t = Thread(target=self._loop)
t.daemon = True
t.start()
def _loop(self):
def get_title(conn):
tree = conn.get_tree()
w = tree.find_focused()
p = w.parent
# dont show window title when the window already has means
# to display it
if not self.always_show and (
w.border == "normal" or w.type == "workspace" or
(p.layout in ("stacked", "tabbed") and len(p.nodes) > 1)):
return self.empty_title
else:
title = w.name
if len(title) > self.max_width:
title = title[:self.max_width - 1] + "…"
return self.py3.safe_format(self.format, {'title': title})
def update_title(conn, e):
# catch only focused window title updates
title_changed = hasattr(e, "container") and e.container.focused
# check if we need to update title due to changes
# in the workspace layout
layout_changed = (
hasattr(e, "binding") and
(e.binding.command.startswith("layout") or
e.binding.command.startswith("move container") or
e.binding.command.startswith("border"))
)
if title_changed or layout_changed:
self.title = get_title(conn)
self.py3.update()
def clear_title(*args):
self.title = self.empty_title
self.py3.update()
conn = i3ipc.Connection()
self.title = get_title(conn) # set title on startup
self.py3.update()
# The order of following callbacks is important!
# clears the title on empty ws
conn.on('workspace::focus', clear_title)
# clears the title when the last window on ws was closed
conn.on("window::close", clear_title)
# listens for events which can trigger the title update
conn.on("window::title", update_title)
conn.on("window::focus", update_title)
conn.on("binding", update_title)
conn.main() # run the event loop
def window_title(self):
resp = {
'cached_until': self.py3.CACHE_FOREVER,
'full_text': self.title,
}
return resp
if __name__ == "__main__":
"""
Run module in test mode.
"""
config = {
'always_show': True,
}
from py3status.module_test import module_test
module_test(Py3status, config=config)
|
test_util.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import re
import sys
import threading
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
from tensorflow.python.platform import logging
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError(
"Expected op for node %s is different. %s vs %s" % (
node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError(
"Not all expected ops are present. Expected %s, found %s" % (
expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"):
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
ops.reset_default_graph()
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
if not self._tempdir:
self._tempdir = googletest.GetTempDir()
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form
message: the message to validate
"""
if type(expected_message_maybe_ascii) == type(message):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message)
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self, expected, actual, producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(graph=None,
config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
with sess.graph.device("/gpu:0"):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail(
"Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: a float value.
f2: a float value.
err: a float value.
"""
self.assertTrue(math.fabs(f1 - f2) < err)
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
for f1, f2 in zip(farray1, farray2):
self.assertNear(f1, f2, err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays have near values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
rtol: relative tolerance
atol: absolute tolerance
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.abs(a - b) > atol + rtol * np.abs(b)
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: a numpy ndarray or anything can be converted to one.
b: a numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(
a.shape, b.shape,
"Shape mismatch: expected %s, got %s." % (a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in OpError exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
errors.OpError exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError(e)
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `Device` object.
device2: A string device name or TensorFlow `Device` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
|
threading_test.py
|
import threading
import time
from threading import Thread
"""
我们只需要创建一个Thread对象,并运行start方法,
解释器就会创建一个子进程执行我们的target,我们创建了5个线程,但是使用threading.enumerate查看线程的数量发现有6个线程,
因为当前在执行的还有一个主线程。主线程会默认等待所有的子线程结束后再结束
"""
# def test(x):
# print('this is {}'.format(x))
# time.sleep(2)
# def get_thread(number=5):
# l_thread = (Thread(target=test, args=(i,)) for i in range(number))
# for t in l_thread:
# print(t)
# t.start() # 启动线程开始执行
# print(len(threading.enumerate()))
# if __name__ == '__main__':
# get_thread(5)
"""
第二次创建线程
"""
class MyThread(Thread):
def __init__(self, x):
super().__init__()
self.x = x
def run(self):
print('this is {}'.format(self.x))
time.sleep(2)
def get_thread1(number=5):
l_thread = (MyThread(i) for i in range(number))
for t in l_thread:
print(t.name)
t.start()
print(len(threading.enumerate()))
if __name__ == '__main__':
get_thread1(5)
|
checker.py
|
from random import choice
from time import sleep, time
from typing import Iterable, Dict, List, Tuple
from json.decoder import JSONDecodeError
import multiprocessing as mp
import os
import urllib3
import requests
from termcolor import colored
from interutils import cyan, pr
from proxion.util import (
Proxy,
)
from proxion import Defaults
from proxion.checker import CheckerFilter
urllib3.disable_warnings()
class ProxyChecker:
'''
Manages the whole process of checking:
Proxy checked first receives an array of parameters
Then we aggregate shuffle and enqueue jobs by splitting every proxy from
the checklist into as many as 4 separate checks per proxy.
A check is defined by target proxy's 'ip:port' and a protocol to check.
Then we spawn some child processes that will pop from the queue and run tests concurrently.
Meanwhile writing to a shared memory variable all the checks that we have done,
Allowing us to periodically show status of the checking process.
'''
def __init__(self, checklist: Iterable[Proxy],
max_threads: int = Defaults.checker_max_threads,
timeout: int = Defaults.checker_timeout,
checker_filter: CheckerFilter = None,
no_shuffle: bool = False,
verbose: bool = False):
self.timeout = timeout
self.verbose = verbose
if max_threads < 1:
raise ValueError(f'Invalid thread count: {max_threads}')
if not checklist:
raise ValueError('No proxies to check!')
# Build job queue based on filter options
self.queue = mp.Queue()
jobs_count = 0
for job in checker_filter.build_joblist(checklist, no_shuffle):
self.queue.put(job)
jobs_count += 1
max_threads = min(max_threads, jobs_count)
pr('Checking %s proxies (%s jobs) on %s threads' % (
cyan(len(checklist)), cyan(jobs_count), cyan(max_threads)
))
self._terminate_flag = False
with mp.Manager() as manager:
self.up = manager.list()
self.jobs_done = manager.Value('i', 0)
procs = []
for _ in range(max_threads):
procs.append(p := mp.Process(
target=self.worker, daemon=True))
p.start()
try:
self.handle_checker_loop(procs, jobs_count)
except KeyboardInterrupt:
self.handle_checker_interruption(procs, jobs_count)
finally:
pr('All children exited')
self.show_status()
# update_stats(time(), self.collect_results())
def handle_checker_loop(self, procs: Iterable[mp.Process], jobs_count: int):
print_interval = 3
last_print = time()
while self.active_children(procs):
sleep(0.25)
if self.verbose and time() - last_print > print_interval:
last_print = time()
pr('Jobs Progress: [%d/%d] = %d%%' % (
self.jobs_done.value, jobs_count, self.jobs_done.value * 100 / jobs_count
), '*')
self.show_status()
def handle_checker_interruption(self, procs: Iterable[mp.Process], jobs_count: int):
print()
pr('Interrupted, Killing children!', '!')
self._terminate_flag = True
self.queue.close()
for p in procs:
p.kill()
termination_print_interval = 2
last_print = time()
while n_alive := self.active_children(procs):
sleep(0.25)
if time() - last_print > termination_print_interval:
last_print = time()
pr(f'Waiting for {cyan(n_alive)} children to exit', '*')
percent_done = 100 * \
int(self.jobs_done.value) / jobs_count
pr(f'Jobs done: [{self.jobs_done.value}/{jobs_count}] = {percent_done}%', '*')
def active_children(self, procs: Iterable[mp.Process]) -> int:
if not procs:
return 0
return list(map(lambda p: p.is_alive(), procs)).count(True)
def worker(self):
while not self.queue.empty():
if self._terminate_flag:
pr('Terminating child although queue is not empty yet', '!')
break
proxy, proto = self.queue.get()
proxy: Proxy
proto: str
if self.verbose:
pr(f'Thread {cyan(os.getpid())} checking: {cyan(proxy.pip)} for proto: {cyan(proto)}', '*')
res = self.perform_check(proxy.pip, proto)
self.jobs_done.value += 1
if res is not None:
pr(f'Working {cyan(", ".join(res.protos))} proxy @ {colored(proxy.pip, "green")}')
self.up.append(res)
break
def perform_check(self, pip: str, protocol: str) -> (Proxy, None):
try:
proxies_dict = {
protocol if protocol == 'http' else 'https': protocol + '://' + pip
}
_t = time()
# Attempt to get our current IP (trough the proxy), expect JSON data!
resp = requests.get('https://ipinfo.io/', proxies=proxies_dict,
timeout=self.timeout, verify=False)
latency = time() - _t
try:
# Attempt to decode the received data
json = resp.json()
try:
return Proxy(pip, (protocol,), time(), latency, json['country'])
except KeyError as err:
pr(f'Result parsing "{err}" from:' + json, '*')
except JSONDecodeError as err:
# Any failure will be a sign of the proxy not forwarding us,
# but instead returning some custom data to us!
pr(f'Status Code: {resp.status_code}, Text: \n{resp.text}', '*')
pr(f'An JSON Decode error "{err}" occurred!', '*')
except (requests.ConnectTimeout, requests.ReadTimeout):
pr(f'{cyan(pip)} -> {cyan(protocol)} timed out', '*')
except requests.ConnectionError:
pr(f'{cyan(pip)} -> {cyan(protocol)} connection error', '*')
except requests.exceptions.InvalidSchema:
pr('SOCKS dependencies unmet!', 'X')
except ValueError as err:
if err.args[0] == 'check_hostname requires server_hostname':
pr(f'{cyan(pip)} -> {cyan(protocol)} TLS error, proxy is probably HTTP', '*')
def show_status(self) -> None:
def _sort_protocols(working: List[Proxy]) -> Dict[str, list]:
''' Sort proxies by ProxyType'''
dic = {}
for proto in Defaults.checker_proxy_protocols:
dic.update({proto: []})
for proxion in working:
for proto in proxion.protos:
dic[proto].append(proxion)
return dic
''' Show status (using the collected results) '''
working = self.up
text = 'Working:'
for proto, proxies in _sort_protocols(working).items():
if proxies:
text += f' {cyan(proto.upper())}:{cyan(len(proxies))}'
pr(text)
print()
|
engine.py
|
# -*- coding:utf-8 -*-
import logging
from logging import Logger
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG,
EVENT_QUOTE
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
QuoteData,
QuoteRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData,
Exchange
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]) -> BaseGateway:
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_quote(self, req: QuoteRequest, gateway_name: str) -> str:
"""
Send new quote request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_quote(req)
else:
return ""
def cancel_quote(self, req: CancelRequest, gateway_name: str) -> None:
"""
Send cancel quote request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_quote(req)
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Query bar history data from a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
#--------------------------------------------------------------------------------------------------
def save_commission_margin_ratio(self):
"""保存接口合约手续费率、保证金率数据"""
for gateway in self.gateways.values():
gateway.save_commission()
gateway.save_margin_ratio()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.quotes: Dict[str, QuoteData] = {}
self.active_orders: Dict[str, OrderData] = {}
self.active_quotes: Dict[str, QuoteData] = {}
self.add_function()
self.register_event()
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_quote = self.get_quote
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_quotes = self.get_all_quotes
self.main_engine.get_all_active_orders = self.get_all_active_orders
self.main_engine.get_all_active_qutoes = self.get_all_active_quotes
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
self.event_engine.register(EVENT_QUOTE, self.process_quote_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event) -> None:
""""""
order: OrderData = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade: TradeData = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position: PositionData = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event) -> None:
""""""
account: AccountData = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract: ContractData = event.data
self.contracts[contract.vt_symbol] = contract
def process_quote_event(self, event: Event) -> None:
""""""
quote: QuoteData = event.data
self.quotes[quote.vt_quoteid] = quote
# If quote is active, then update data in dict.
if quote.is_active():
self.active_quotes[quote.vt_quoteid] = quote
# Otherwise, pop inactive quote from in dict
elif quote.vt_quoteid in self.active_quotes:
self.active_quotes.pop(quote.vt_quoteid)
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid: str) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_quote(self, vt_quoteid: str) -> Optional[QuoteData]:
"""
Get latest quote data by vt_orderid.
"""
return self.quotes.get(vt_quoteid, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_quotes(self) -> List[QuoteData]:
"""
Get all quote data.
"""
return list(self.quotes.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
def get_all_active_quotes(self, vt_symbol: str = "") -> List[QuoteData]:
"""
Get all active quotes by vt_symbol.
If vt_symbol is empty, return all active qutoes.
"""
if not vt_symbol:
return list(self.active_quotes.values())
else:
active_quotes = [
quote
for quote in self.active_quotes.values()
if quote.vt_symbol == vt_symbol
]
return active_quotes
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
agent.py
|
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
import time, os, traceback, multiprocessing, portalocker
import envwrap
import valuerl
import util
from config import config
def run_env(pipe):
env = envwrap.get_env(config["env"]["name"])
reset = True
while True:
if reset is True: pipe.send(env.reset())
action = pipe.recv()
obs, reward, done, reset = env.step(action)
pipe.send((obs, reward, done, reset))
class AgentManager(object):
"""
Interact with the environment according to the learned policy,
"""
def __init__(self, proc_num, evaluation, policy_lock, batch_size, config):
self.evaluation = evaluation
self.policy_lock = policy_lock
self.batch_size = batch_size
self.config = config
np.random.seed(self.config["seed"])
tf.set_random_seed(self.config["seed"])
self.log_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["log_path"])) + "/%s" % config["name"]
self.load_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["save_model_path"]))
## placeholders for intermediate states (basis for rollout)
self.obs_loader = tf.placeholder(tf.float32, [self.batch_size, np.prod(self.config["env"]["obs_dims"])])
## build model
self.valuerl = valuerl.ValueRL(self.config["name"], self.config["env"], self.config["policy_config"])
self.policy_actions = self.valuerl.build_evalution_graph(self.obs_loader, mode="exploit" if self.evaluation else "explore")
# interactors
self.agent_pipes, self.agent_child_pipes = list(zip(*[multiprocessing.Pipe() for _ in range(self.batch_size)]))
self.agents = [multiprocessing.Process(target=run_env, args=(self.agent_child_pipes[i],)) for i in range(self.batch_size)]
for agent in self.agents: agent.start()
self.obs = [pipe.recv() for pipe in self.agent_pipes]
self.total_rewards = [0. for _ in self.agent_pipes]
self.loaded_policy = False
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.rollout_i = 0
self.proc_num = proc_num
self.epoch = -1
self.frame_total = 0
self.hours = 0.
self.first = True
def get_action(self, obs):
if self.loaded_policy:
all_actions = self.sess.run(self.policy_actions, feed_dict={self.obs_loader: obs})
all_actions = np.clip(all_actions, -1., 1.)
return all_actions[:self.batch_size]
else:
return [self.get_random_action() for _ in range(obs.shape[0])]
def get_random_action(self, *args, **kwargs):
return np.random.random(self.config["env"]["action_dim"]) * 2 - 1
def step(self):
actions = self.get_action(np.stack(self.obs))
self.first = False
[pipe.send(action) for pipe, action in zip(self.agent_pipes, actions)]
next_obs, rewards, dones, resets = list(zip(*[pipe.recv() for pipe in self.agent_pipes]))
frames = list(zip(self.obs, next_obs, actions, rewards, dones))
self.obs = [o if resets[i] is False else self.agent_pipes[i].recv() for i, o in enumerate(next_obs)]
for i, (t,r,reset) in enumerate(zip(self.total_rewards, rewards, resets)):
if reset:
self.total_rewards[i] = 0.
if self.evaluation and self.loaded_policy:
with portalocker.Lock(self.log_path+'.greedy.csv', mode="a") as f: f.write("%2f,%d,%d,%2f\n" % (self.hours, self.epoch, self.frame_total, t+r))
else:
self.total_rewards[i] = t + r
if self.evaluation and np.any(resets): self.reload()
self.rollout_i += 1
return frames
def reload(self):
if not os.path.exists("%s/%s.params.index" % (self.load_path ,self.valuerl.saveid)): return False
with self.policy_lock:
self.valuerl.load(self.sess, self.load_path)
self.epoch, self.frame_total, self.hours = self.sess.run([self.valuerl.epoch_n, self.valuerl.frame_n, self.valuerl.hours])
self.loaded_policy = True
self.first = True
return True
def main(proc_num, evaluation, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config):
try:
# np.random.seed((proc_num * int(time.time())) % (2 ** 32 - 1))
agentmanager = AgentManager(proc_num, evaluation, policy_lock, config["evaluator_config"]["batch_size"] if evaluation else config["agent_config"]["batch_size"], config)
frame_i = 0
while True:
new_frames = agentmanager.step()
print('frame_i:{}'.format(frame_i))
if not evaluation:
policy_replay_frame_queue.put(new_frames)
if model_replay_frame_queue is not None: model_replay_frame_queue.put(new_frames)
if frame_i % config["agent_config"]["reload_every_n"] == 0: agentmanager.reload()
frame_i += len(new_frames)
except Exception as e:
print('Caught exception in agent process %d' % proc_num)
traceback.print_exc()
print()
try:
for i in agentmanager.agents: i.join()
except:
pass
raise e
|
tinkup.py
|
from cgitb import text
import queue
from random import seed
import serial
import serial.tools.list_ports
from signal import signal, SIGINT
import sys
import threading
import time
import tkinter
from tkinter import END, W, PhotoImage, filedialog as fd, scrolledtext as sd
global fw_filename
fw_filename = ""
COM_OVERRIDE=None
VERSION='1.0'
DEBUG=False
running = True
class PrintLogger():
def __init__(self, textbox):
self.textbox = textbox
def write(self, text):
self.textbox.insert(tkinter.END, text)
self.textbox.see(END)
def flush(self):
pass
def on_closing():
global running
running = False
def sig_handler(signal_received, frame):
on_closing()
class Tink:
cmd = {
'CmdGetVer': b'\x01',
'CmdErase': b'\x02',
'CmdWrite': b'\x03',
'JumpApp': b'\x05',
}
ctrl = {
'SOH': b'\x01',
'EOT': b'\x04',
'DLE': b'\x10',
}
rxfsm = {
'RxIdle': 0,
'RxBuffer': 1,
'RxEscape': 2,
}
blfsm = {
'BlIdle': 0,
'BlVersion': 1,
'BlErase': 2,
'BlWrite': 3,
'BlJump': 4,
}
serial = None
rx_state = rxfsm['RxIdle']
def timer(self, timestamp):
# 100ms interval timer
if running:
timestamp += 0.1
self.timer_thread = threading.Timer(timestamp - time.time(), self.timer, args=(timestamp,)).start()
def calc_crc(self, b):
# NOTE: This is the CRC lookup table for polynomial 0x1021
lut = [
0, 4129, 8258, 12387,\
16516, 20645, 24774, 28903,\
33032, 37161, 41290, 45419,\
49548, 53677, 57806, 61935]
num1 = 0
for num2 in b:
num3 = (num1 >> 12) ^ (num2 >> 4)
num4 = (lut[num3 & 0x0F] ^ (num1 << 4)) & 0xFFFF
num5 = (num4 >> 12) ^ num2
num1 = (lut[num5 & 0x0F] ^ (num4 << 4)) & 0xFFFF
return num1
def rx_process(self, packet, debug=DEBUG):
if debug:
print('Processing packet: %s' % packet.hex())
crc_rx = (packet[-1] << 8) | packet[-2]
if self.calc_crc(packet[0:-2]) != crc_rx:
print('Bad CRC received, resetting state')
self.bl_state = self.blfsm['BlIdle']
else:
cmd = bytes([packet[0]])
payload = packet[1:-2]
if self.bl_state == self.blfsm['BlVersion']:
if cmd == self.cmd['CmdGetVer']:
print('Found device ID: %s' % payload.decode().split('\x00')[0])
print('Erasing device... ', end='')
self.tx_packet(self.cmd['CmdErase'])
self.bl_state = self.blfsm['BlErase']
else:
print('ERROR: Expected response code CmdGetVer, got %s' % packet[0])
elif self.bl_state == self.blfsm['BlErase']:
if cmd == self.cmd['CmdErase']:
print('OKAY')
self.hex_line = 1
self.fw_file = open(self.fw_name, 'r')
tx = bytearray(self.cmd['CmdWrite'])
hex_line = bytes.fromhex(self.fw_file.readline().rstrip()[1:])
tx += hex_line
print('Writing firmware %d/%d... ' % (self.hex_line, self.hex_nline), end='')
self.tx_packet(tx)
self.bl_state = self.blfsm['BlWrite']
else:
print('ERROR: Expected response code CmdErase, got %s' % packet[0])
elif self.bl_state == self.blfsm['BlWrite']:
if cmd == self.cmd['CmdWrite']:
print('OKAY')
self.hex_line = self.hex_line + 1
# hex_line starts at 1, so we need to send up to and
# including hex_nline
if self.hex_line > self.hex_nline:
print('Update complete, booting firmware')
self.bl_state = self.blfsm['BlJump']
self.tx_packet(self.cmd['JumpApp'])
button_state()
return
# There doesnt seem to be a response to the JumpApp
# command, so at this point we're done.
self.running = False
else:
tx = bytearray(self.cmd['CmdWrite'])
hex_line = bytes.fromhex(self.fw_file.readline().rstrip()[1:])
tx += hex_line
print('Writing firmware %d/%d... ' % (self.hex_line, self.hex_nline), end='')
self.tx_packet(tx)
else:
print('ERROR: Expected response code CmdWrite, got %s' % packet[0])
def rx_buffer(self, b, debug=DEBUG):
state_begin = self.rx_state
if self.rx_state == self.rxfsm['RxIdle']:
# Ignore bytes until we see SOH
if b == self.ctrl['SOH']:
self.rxbuf = bytearray()
self.rx_state = self.rxfsm['RxBuffer']
elif self.rx_state == self.rxfsm['RxBuffer']:
if b == self.ctrl['DLE']:
# Escape the next control sequence
self.rx_state = self.rxfsm['RxEscape']
elif b == self.ctrl['EOT']:
# End of transmission
self.rx_state = self.rxfsm['RxIdle']
self.rx_process(self.rxbuf)
else:
# Buffer the byte
self.rxbuf += b
elif self.rx_state == self.rxfsm['RxEscape']:
# Unconditionally buffer any byte following the escape sequence
self.rxbuf += b
self.rx_state = self.rxfsm['RxBuffer']
else:
# Shouldn't get here
print('Unknown state')
self.rx_state = self.rxfsm['RxIdle']
if debug:
keys = list(self.rxfsm.keys())
vals = list(self.rxfsm.values())
s0 = vals.index(state_begin)
s1 = vals.index(self.rx_state)
print('RX: %s, RX FSM state: %s -> %s' % (b.hex(), keys[s0], keys[s1]))
def rx(self):
while running:
if self.serial:
b = self.serial.read(1)
if b:
self.rx_buffer(b)
else:
print('RX timeout?')
else:
print('Lost serial port')
time.sleep(1)
def tx(self, b, debug=DEBUG):
if debug:
print('TX: %s' % b.hex())
if self.serial and self.serial.is_open:
try:
self.serial.write(b)
self.serial.flush()
except:
print('TX failure')
button_state()
return
else:
print('TX failure, serial port not writeable')
button_state()
return
def tx_packet(self, b):
# b should be a bytearray
crc = self.calc_crc(b)
b += bytes([crc & 0xFF])
b += bytes([(crc >> 8) & 0xFF])
b_tx = bytearray(self.ctrl['SOH'])
for bb in b:
bb = bytes([bb])
# Escape any control characters that appear in the TX buffer
if bb == self.ctrl['SOH'] or bb == self.ctrl['EOT'] or bb == self.ctrl['DLE']:
b_tx += self.ctrl['DLE']
b_tx += bb
b_tx += self.ctrl['EOT']
self.tx(b_tx)
def __init__(self, fw_name=None, port=None):
self.rx_state = self.rxfsm['RxIdle']
self.bl_state = self.blfsm['BlIdle']
self.fw_name = fw_name
self.hex_nline = 0
self.hex_line = 0
# Ensure the file exists, has valid Intel Hex checksums, and count lines
try:
with open(self.fw_name) as fw_file:
for line in fw_file:
self.hex_nline = self.hex_nline + 1
line = line.rstrip()[1:]
try:
checksum = bytes.fromhex(line[-2:])
except:
print('%s is not a valid hex file' % fw_name)
button_state()
return
# It seems to just load hex if it's blank
data = bytes.fromhex(line[:-2])
s = bytes([((~(sum(data) & 0xFF) & 0xFF) + 1) & 0xFF])
if checksum != s:
print('%s is not a valid hex file' % fw_name)
button_state()
return
except:
print('No file selected')
button_state()
return
comports = []
try:
if port == None:
comports_all = [comport for comport in serial.tools.list_ports.comports()]
for com in comports_all:
if com.manufacturer == 'FTDI':
comports.append(com.device)
else:
comports.append(port)
if comports:
if len(comports) > 1:
print('Several FTDI devices detected - not sure which to target. Aborting.')
# TODO: Add interactive device selector?
button_state()
return
for com in comports:
try:
self.serial = serial.Serial(com, baudrate=115200, timeout=None, rtscts=True)
print('Opened device at %s' % com)
except Exception as ex:
print('Could not open device at %s' % com)
print('Exception: %s' % ex)
button_state()
return
else:
print('No RetroTINK devices found')
button_state()
return
except:
print('No communication with device')
button_state()
return
if self.serial:
self.rx_process_thread = threading.Thread(target=self.rx, args=())
self.rx_process_thread.daemon = True
self.rx_process_thread.start()
self.timer_thread = threading.Thread(target=self.timer, args=(time.time() + 0.1,))
self.timer_thread.daemon = True
self.timer_thread.start()
else:
button_state()
return
self.running = True
retries = 1
self.bl_state = self.blfsm['BlVersion']
while retries and running:
retries = retries - 1
print('Probing device... ', end='')
self.tx_packet(self.cmd['CmdGetVer'])
time.sleep(1)
# Need to add a timeout
def file_select():
filetypes = (
('hex files', '*.hex'),
('All files', '*.*')
)
fw_filename = fd.askopenfilename(
title='Select hex',
initialdir='/',
filetypes=filetypes)
browse_box.configure(state="normal")
browse_box.delete(0, END)
browse_box.insert(0,fw_filename)
browse_box.configure(state="readonly")
def tink_flash():
fw_filename = browse_box.get()
try:
button_state()
tink = Tink(fw_name=fw_filename, port=COM_OVERRIDE)
except:
print('Could not execute flash')
button_state()
return
def button_state():
if browse_button['state'] == "normal":
browse_button.configure(state="disabled")
flash_button.configure(state="disabled")
else:
browse_button.configure(state="normal")
flash_button.configure(state="normal")
if __name__ == '__main__':
signal(SIGINT, sig_handler)
window = tkinter.Tk()
window.geometry('680x380')
window.iconbitmap(default='./assets/icon.ico')
window.title('tinkup-gui')
window.resizable(False,False)
window.eval('tk::PlaceWindow . center')
tink_logo = PhotoImage(file='./assets/RetroTINK-logo.png')
tink_logo = tink_logo.subsample(4,4)
tink_label = tkinter.Label(window,image=tink_logo)
tink_label.place(x=285, y=10)
fw_label = tkinter.Label(window,text="Hex File:")
fw_label.place(x=325, y=90)
browse_box = tkinter.Entry(window,textvariable=fw_filename)
browse_box.configure(state="readonly")
browse_box.place(x=10, y=120, width=582)
browse_button = tkinter.Button(window,text='Load HEX',command=file_select)
browse_button.place(x=610, y=115)
flash_button = tkinter.Button(window, text="Flash", command=tink_flash)
flash_button.place(x=330, y=145)
print_text = sd.ScrolledText(window, undo=True)
print_text.place(x=10, y=180, height=180)
logger = PrintLogger(print_text)
sys.stdout = logger
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
window.mainloop()
on_closing()
|
jetbot_center.py
|
import threading
import socket
import jetbot_stream, jetbot_socket
from jetbot import Camera, bgr8_to_jpeg
import ipywidgets.widgets as widgets
import traitlets
##取得ip
ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ss.connect(("8.8.8.8", 80))
ip = ss.getsockname()[0]
##初始化相機
camera = Camera.instance(width=224, height=224)
image = widgets.Image(format='jpeg', width=224, height=224)
camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
##jetbot_socket設定
jetbot_socket.setIP(ip)
jetbot_socket.createServer()
jetbot_socket.setImage(image)
jetbot_socket.setCamera(camera)
##
##jetbot_stream設定
jetbot_stream.setIP(ip)
jetbot_stream.createServer()
jetbot_stream.setImage(image)
##
##執行伺服器執行緒
thread_socket = threading.Thread(target=jetbot_socket.start)
thread_socket.start()
thread_stream = threading.Thread(target=jetbot_stream.start)
thread_stream.start()
print("ALL Server Ready")
|
openvino_yolov3_MultiStick_test.py
|
import sys, os, cv2, time, heapq, argparse
import numpy as np, math
try:
from armv7l.openvino.inference_engine import IENetwork, IEPlugin
except:
from openvino.inference_engine import IENetwork, IEPlugin
import multiprocessing as mp
from time import sleep
import threading
yolo_scale_13 = 13
yolo_scale_26 = 26
yolo_scale_52 = 52
classes = 80
coords = 4
num = 3
anchors = [10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326]
LABELS = ("person", "bicycle", "car", "motorbike", "aeroplane",
"bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird",
"cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard","tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "sofa", "pottedplant", "bed",
"diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush")
label_text_color = (255, 255, 255)
label_background_color = (125, 175, 75)
box_color = (255, 128, 0)
box_thickness = 1
processes = []
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
lastresults = None
def EntryIndex(side, lcoords, lclasses, location, entry):
n = int(location / (side * side))
loc = location % (side * side)
return int(n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc)
class DetectionObject():
xmin = 0
ymin = 0
xmax = 0
ymax = 0
class_id = 0
confidence = 0.0
def __init__(self, x, y, h, w, class_id, confidence, h_scale, w_scale):
self.xmin = int((x - w / 2) * w_scale)
self.ymin = int((y - h / 2) * h_scale)
self.xmax = int(self.xmin + w * w_scale)
self.ymax = int(self.ymin + h * h_scale)
self.class_id = class_id
self.confidence = confidence
def IntersectionOverUnion(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
area_of_overlap = 0.0
if (width_of_overlap_area < 0.0 or height_of_overlap_area < 0.0):
area_of_overlap = 0.0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
retval = 0.0
if area_of_union <= 0.0:
retval = 0.0
else:
retval = (area_of_overlap / area_of_union)
return retval
def ParseYOLOV3Output(blob, resized_im_h, resized_im_w, original_im_h, original_im_w, threshold, objects):
out_blob_h = blob.shape[2]
out_blob_w = blob.shape[3]
side = out_blob_h
anchor_offset = 0
if side == yolo_scale_13:
anchor_offset = 2 * 6
elif side == yolo_scale_26:
anchor_offset = 2 * 3
elif side == yolo_scale_52:
anchor_offset = 2 * 0
side_square = side * side
output_blob = blob.flatten()
for i in range(side_square):
row = int(i / side)
col = int(i % side)
for n in range(num):
obj_index = EntryIndex(side, coords, classes, n * side * side + i, coords)
box_index = EntryIndex(side, coords, classes, n * side * side + i, 0)
scale = output_blob[obj_index]
if (scale < threshold):
continue
x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w
y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h
height = math.exp(output_blob[box_index + 3 * side_square]) * anchors[anchor_offset + 2 * n + 1]
width = math.exp(output_blob[box_index + 2 * side_square]) * anchors[anchor_offset + 2 * n]
for j in range(classes):
class_index = EntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j)
prob = scale * output_blob[class_index]
if prob < threshold:
continue
obj = DetectionObject(x, y, height, width, j, prob, (original_im_h / resized_im_h), (original_im_w / resized_im_w))
objects.append(obj)
return objects
def camThread(LABELS, results, frameBuffer, camera_width, camera_height, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global window_name
#cam = cv2.VideoCapture(0)
#if cam.isOpened() != True:
# print("USB Camera Open Error!!!")
# sys.exit(0)
#cam.set(cv2.CAP_PROP_FPS, vidfps)
#cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
#cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
#window_name = "USB Camera"
#wait_key_time = 1
cam = cv2.VideoCapture("data/input/testvideo4.mp4")
camera_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
camera_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
window_name = "Movie File"
wait_key_time = int(1000 / vidfps)
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream Read
s, color_image = cam.read()
if not s:
continue
if frameBuffer.full():
frameBuffer.get()
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
if not results.empty():
objects = results.get(False)
detectframecount += 1
for obj in objects:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
lastresults = objects
else:
if not isinstance(lastresults, type(None)):
for obj in lastresults:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
cv2.putText(color_image, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(color_image, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow(window_name, cv2.resize(color_image, (width, height)))
if cv2.waitKey(wait_key_time)&0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Playback) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = "./lrmodels/YoloV3/FP16/frozen_yolo_v3.xml"
self.model_bin = "./lrmodels/YoloV3/FP16/frozen_yolo_v3.bin"
self.camera_width = camera_width
self.camera_height = camera_height
self.m_input_size = 416
self.threshould = 0.7
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.number_of_ncs = number_of_ncs
self.predict_async_time = 800
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
self.new_w = int(camera_width * min(self.m_input_size/camera_width, self.m_input_size/camera_height))
self.new_h = int(camera_height * min(self.m_input_size/camera_width, self.m_input_size/camera_height))
def image_preprocessing(self, color_image):
resized_image = cv2.resize(color_image, (self.new_w, self.new_h), interpolation = cv2.INTER_CUBIC)
canvas = np.full((self.m_input_size, self.m_input_size, 3), 128)
canvas[(self.m_input_size-self.new_h)//2:(self.m_input_size-self.new_h)//2 + self.new_h,(self.m_input_size-self.new_w)//2:(self.m_input_size-self.new_w)//2 + self.new_w, :] = resized_image
prepimg = canvas
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def skip_frame_measurement(self):
surplustime_per_second = (1000 - self.predict_async_time)
if surplustime_per_second > 0.0:
frame_per_millisecond = (1000 / self.vidfps)
total_skip_frame = surplustime_per_second / frame_per_millisecond
self.skip_frame = int(total_skip_frame / self.num_requests)
else:
self.skip_frame = 0
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
objects = []
outputs = self.exec_net.requests[dev].outputs
for output in outputs.values():
objects = ParseYOLOV3Output(output, self.new_h, self.new_w, self.camera_height, self.camera_width, self.threshould, objects)
objlen = len(objects)
for i in range(objlen):
if (objects[i].confidence == 0.0):
continue
for j in range(i + 1, objlen):
if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
objects[j].confidence = 0
self.results.put(objects)
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc()
def inferencer(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
args = parser.parse_args()
number_of_ncs = args.number_of_ncs
camera_width = 320
camera_height = 240
vidfps = 30
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer, args=(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
sleep(number_of_ncs * 7)
# Start streaming
p = mp.Process(target=camThread, args=(LABELS, results, frameBuffer, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
bluetooth.py
|
import Queue as queue
import serial
import struct
import threading
BLUETOOTH_EVENTS = {
0x05: 'Disconnection Complete',
0x08: 'Encryption Change',
0x0c: 'Read Remote Version Information Complete',
0x0e: 'Command Complete',
0x0f: 'Command Status',
0x10: 'Hardware Error (optional)',
0x13: 'Number Of Completed Packets',
0x1a: 'Data Buffer Overflow',
0x30: 'Encryption Key Refresh Complete',
}
BLUETOOTH_LE_EVENTS = {
0x01: 'LE Connection Complete',
0x02: 'LE Advertising Report',
0x03: 'LE Connection Update Complete',
0x04: 'LE Read Remote Used Features Complete',
0x05: 'LE Long Term Key Requested',
}
IO_TIMEOUT = 2
class Packet(object):
packet_type = None
@classmethod
def from_data(cls, data):
if cls is Packet:
raise TypeError('from_data should be called on Packet subclass')
packet_type = struct.unpack('B', data[0])[0]
if packet_type == cls.packet_type:
return cls.parse(data[1:])
else:
raise TypeError('This is not a %s' % cls.__name__)
@classmethod
def parse(cls, data):
raise NotImplementedError('A generic Packer could not be parsed')
class CommandPacket(Packet):
packet_type = 1
def __init__(self, opcode=None, fmt=None, *params):
self.opcode = opcode
self.fmt = fmt
self.params = params
@classmethod
def parse(cls, data):
return cls()
def serialize(self):
fmt = '<BHB%s' % self.fmt
size = struct.calcsize('<%s' % self.fmt)
return struct.pack(
fmt, self.packet_type, self.opcode, size, *self.params)
class EventPacket(Packet):
packet_type = 4
def __init__(self, code=None, fmt=None, *params):
self.code = code
self.fmt = fmt
self.params = params
@classmethod
def parse(cls, data):
return cls()
def serialize(self):
fmt = '<BB%s' % self.fmt
size = struct.calcsize('<%s' % self.fmt)
return struct.pack(
fmt, self.packet_type, self.code, size, *self.params)
class BluetoothDevice(object):
def __init__(self, port=None, baudrate=57600):
self.ready = False
self.serial = serial.Serial(port, baudrate, timeout=IO_TIMEOUT)
self.packet_queue = queue.Queue()
def init_device(self):
packet = CommandPacket(0xFE00, 'BB16s16sL', 8, 3, '\x00', '\x00', 1)
self.packet_queue.put(packet)
def _start_reader(self):
self._reader_alive = True
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
self.init_device()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def reader(self):
try:
while self.alive and self._reader_alive:
data = self.serial.read()
if not data:
continue
packet_type = struct.unpack('<B', data)[0]
if packet_type == EventPacket.packet_type:
event_code, params_len = struct.unpack(
'<BB', self.serial.read(2))
params_data = self.serial.read(params_len)
if event_code == 0xff:
print 'Vendor specific event'
elif event_code in BLUETOOTH_EVENTS:
event = BLUETOOTH_EVENTS[event_code]
print 'Bluetooth event "%s"' % event
elif event_code == 0x3e:
sub_event_code = struct.unpack('<B', params_data[0])[0]
event = BLUETOOTH_LE_EVENTS[sub_event_code]
print 'Bluetooth LE event "%s"' % event
else:
print 'Unknown event code %02x' % event_code
else:
print 'wrong packet type %02x' % packet_type
except serial.SerialException:
self.alive = False
raise
def writer(self):
try:
while self.alive:
try:
packet = self.packet_queue.get(timeout=IO_TIMEOUT)
self.serial.write(packet.serialize())
except queue.Empty:
pass
except:
self.alive = False
raise
|
sensors.py
|
import sys
import wifi
import base64
import csv
import json
import threading
from datetime import datetime
import subprocess
import socket
import urllib
import os
import configparser
import subprocess as sp
os.system('sudo systemctl start bluetooth')
import pygatt
from binascii import hexlify
import time
import binascii
from bluepy import btle
from bluepy.btle import Scanner, DefaultDelegate
import paho.mqtt.client as mqtt
#Get the serial number of the raspberry pi
def getserial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
return cpuserial
#MQTT parameters, if you need to edit, use the file mqtt.conf
global id,client,a,b,c,d,e,f,g,h,i,j,topic
try:
config = configparser.RawConfigParser()
config.read('mqtt.conf')
id = str(getserial())
topic = config.get('MQTT','topic')
broker_address = config.get('MQTT','broker_address')
client = mqtt.Client(id)
#To change the certificat for the mqtt server, replace the older one, or create a new one and uncomment the line below : client.tls_set("Name_of_your_certificat.crt")
client.tls_set("ca.crt")
client.username_pw_set(config.get('MQTT','username'), config.get('MQTT','password'))
client.connect(broker_address)
except:
try:
config = configparser.RawConfigParser()
config.read('mqtt.conf')
id = str(getserial())
topic = config.get('MQTT','topic')
broker_address = "109.219.230.92"
client = mqtt.Client(id)
#To change the certificat for the mqtt server, replace the older one, or create a new one and uncomment the line below : client.tls_set("Name_of_your_certificat.crt")
client.tls_set("ca2.crt")
client.username_pw_set(config.get('MQTT','username'), config.get('MQTT','password'))
client.connect(broker_address)
except: print("Can\'t connect to mqtt server")
#Create directory in the working directory and usb stick to save data
if os.path.isdir("/home/pi/Data") != True:
os.system("sudo mkdir /home/pi/Data")
try:
if os.path.isdir("/media/usb/Data") != True:
os.system("sudo mkdir /media/usb/Data")
except:error("usb stick unmounted")
#Return True or False, if there a internet connection or not
def checkInternet():
connected = False
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("www.google.com", 80))
connected = True
return connected
except socket.gaierror:
print ("Not connected")
return connected
#Encode string to base64
def ToBase64(s):
return base64.b64encode(s.encode('utf-8'))
#Search if there is a number in a string
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
#Print error for debug
def error(msg):
print (msg)
#log = open('logError.txt','a') log.write('\n Error at : '+str(time.time())+' , '+str(msg)) log.close()
#Convert wifi.csv file to .json file
def wcj(file):
try:
try:
csvfile = open(file, 'r')
except:error('Can\'t convert in csv to json the file :'+str(file)+' (wifi) in wcj(util.py)' )
try:
jfile = file[:-3]+"json"
jsonfile = open(jfile, 'w')
except:error('Can\'t create json in wcj(util.py)')
try:
reader = csv.DictReader(csvfile,("ID","TimeStamp","b64","BSSID","channel","RSSI","EncryptionKey","SSID","Chiffrement","idp"))
out = json.dumps( [ row for row in reader ] )
jsonfile.write(out)
os.system('sudo rm '+file)
return jfile
except:error('Can\'t write json in wcj(util.py)')
except:error('Can\'t convert (wifi) csv to json in wcj(util.py)')
#Scan all the wifi around and save it in bsae64 in csv, then it's convert to json
def SearchW(comp):
#Scan every wifi around
try:
cells = wifi.Cell.all('wlan0')
except:error('wlan0 busy, can\'t scan (wif.py)')
#If the program restart, there might be some old files, to avoid to lost them by rewrite, we skip it for the name, and send it later
try:
while os.path.isfile('/home/pi/Data/wifi_'+str(comp)+'.json') == True or os.path.isfile('/home/pi/Data/sw_'+str(comp)+'.json') == True:
comp=comp+1
except:error('Failed to skip the name (wif.py)')
#Changing name in case we lost connection
try:
file = '/home/pi/Data/sw_'+str(comp)+'.csv'
idp = "sw_"+str(comp)
#id = str(getserial())
b64 = 'true'
except:error('Failed to write the name of the file (wif.py)')
#Create file to send data
try:
print (file)
z = open(file,'w')
except:error('Failed to create file for scan (wif.py)')
#Get data of scanning, all the data is in base64 to avoid error about special character
try:
for cell in cells:
print (cell)
timestamptmp = str(time.time())
timestamp = ToBase64(timestamptmp)
bssidtmp = str(cell.address)
bssid = ToBase64(bssidtmp)
channeltmp = str(cell.channel)
channel = ToBase64(channeltmp)
rssitmp = str(cell.signal)
rssi = ToBase64(rssitmp)
encryptiontmp = str(cell.encrypted)
encryption = ToBase64(encryptiontmp)
ssidtmp = str(cell.ssid)
ssid = ToBase64(ssidtmp)
if encryption == ToBase64('True'):
chiffrementtmp = str(cell.encryption_type)
chiffrement = ToBase64(chiffrementtmp)
else: chiffrement = ToBase64('Not protected')
rowtmp = str(id)+','+str(timestamp)+','+str(b64)+','+str(bssid)+','+str(channel)+','+str(rssi)+','+str(encryption)+','+str(ssid)+','+str(chiffrement)+','+str(idp)+'\n'
#Writing data
rowtmp2 = rowtmp.replace("b'","")
row = rowtmp2.replace("'","")
#print (row)
try:
z.write(row)
except:error('Failed to write the file (wif.py)')
except:error('Failed to collect data (wif.py)')
#Close and open the file to save the data before conversion
try:
z.close
z = open(file,'r')
except:error('Failed to open the file (wif.py)')
#Convert csv to json
try:
jfile = wcj(file)
except:error('Failed to convert the file (wif.py)')
#Sending the file if there is a internet connection
try:
os.system("sudo cp "+jfile+" /media/usb/Data")
except:error("usb stick unmounted")
#Scan wifi every 10 seconds
def w():
comp = 0
while 1:
print ("\n=========================== Scan Wifi Start =========================\n")
try:
SearchW(comp)
comp=comp+1
print (comp)
except:error('\n Error at : '+str(time.time())+' . Can\'t run scanWifi in sw.py')
print ("\n========================= Scan Wifi Complete ========================\n")
time.sleep(10)
def whichConnectedWifi():
networkInfos = subprocess.check_output(['iwgetid']).split()
for networkInfo in networkInfos:
if networkInfo[0:5]=="ESSID":
info = networkInfo.split('"')
connectedWifi = info[1]
return connectedWifi
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
if isNewDev:
print("Discovered device", dev.addr)
elif isNewData:
print("Received new data from", dev.addr)
#Saves all the captors' data in the working directory if there is no internet connection
def writedata(data):
if checkInternet() == False:
with open("mydata.json","a") as f:
f.write(str(data)+',')
f.close()
#Saves all the captors' data in the usb stick as a historic
def savedata(data):
with open("/media/usb/mydata.json","a") as f:
f.write(str(data)+',\n')
f.close()
#If there is a save file in the working directory, send it when connection is available
def offline(handle,value):
if checkInternet() == True and os.path.isfile("mydata.json")==True:
with open("mydata.json","r") as alldata:
test = str(alldata.read())
print (test)
client.publish("sensiLogger", test)
os.system('sudo rm mydata.json')
elif os.path.isfile("mydata.json")==False:
client.publish("sensiLogger","Pas de tableau a envoyer")
#Get the luminosity value
def Luminosity(handle, value):
lumHex = str(hexlify(value))
lum = int(lumHex[8:10] + lumHex[6:8],16)
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Luminosity\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(lum)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the temperature value
def Temperature(handle, value):
temHex = str(hexlify(value))
tem = int(temHex[8:10] + temHex[6:8],16)/10
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Temperature\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(tem)+"\"}"
writedata(str(myData))
savedata(str(myData))
client.publish(topic,str(myData))
#Get the battery level
def Battery(handle, value):
batHex = str(hexlify(value))
bat = int(batHex[12:14] +batHex[10:12],16)/1000
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Battery\", \"id\":\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(bat)+"\"}"
client.publish(topic,str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the humidity value
def Humidity(handle, value):
humHex = str(hexlify(value))
hum = int(humHex[8:10] + humHex[6:8],16)/10
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Humidity\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(hum)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the accelerometer, gyroscope and magnetometer values
def Motion(handle, value):
motHex = str(hexlify(value))
tim = int(round(time.time() * 1000))
accX = int(motHex[8:10] + motHex[6:8],16)/100
accY = int(motHex[12:14] + motHex[10:12],16)/100
accZ = int(motHex[16:18] + motHex[14:16],16)/100
gyrX = int(motHex[20:22] + motHex[18:20],16)
gyrY = int(motHex[24:26] + motHex[22:24],16)
gyrZ = int(motHex[28:30] + motHex[26:28],16)
magX = int(motHex[32:34] + motHex[30:32],16)/100
magY = int(motHex[36:38] + motHex[34:36],16)/100
magZ = int(motHex[40:42] + motHex[38:40],16)/100
myData="{\"type\":\"Accelerometer\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(accX)+"\", \"Y\" : \""+str(accY)+"\", \"Z\" : \""+str(accZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
myData="{\"type\":\"Gyroscope\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(gyrX)+"\", \"Y\" : \""+str(gyrY)+"\", \"Z\" : \""+str(gyrZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
myData="{\"type\":\"Magnetometer\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"X\" : \""+str(magX)+"\", \"Y\" : \""+str(magY)+"\", \"Z\" : \""+str(magZ)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the pressure value
def Pressure(handle, value):
preHex = str(hexlify(value))
pre = int(preHex[12:14] + preHex[10:12] + preHex[8:10] + preHex[6:8],16)/100
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Pressure\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(pre)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Get the mic level
def Mic_Level(handle, value):
micHex = str(hexlify(value))
mic = int(micHex[8:10] + micHex[6:8],16)
tim = int(round(time.time() * 1000))
myData="{\"type\":\"Mic_Level\", \"id\" :\""+str(id)+"\", \"timestamp\" : \""+str(tim)+"\", \"value\" : \""+str(mic)+"\"}"
client.publish(topic, str(myData))
writedata(str(myData))
savedata(str(myData))
#Connect to the sensiBLE, to the mqtt server and send all the data of the captors
def senddata():
while 1:
cont = 1
client.connect(broker_address)
client.loop_start()
connectedWifi = whichConnectedWifi()
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(10.0)
uuid = "00:00:00:00:00:00"
for dev in devices:
print("Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi))
for (adtype, desc, value) in dev.getScanData():
if value=="SensiBLE":
uuid = dev.addr
print(" %s = %s" % (desc, value))
print("Connecting...")
time.sleep(1)
adapter = pygatt.GATTToolBackend()
try:
adapter.start()
device = adapter.connect(uuid)
device.subscribe("01000000-0001-11e1-ac36-0002a5d5c51b",callback=Luminosity)
time.sleep(1)
device.subscribe("00040000-0001-11e1-ac36-0002a5d5c51b",callback=Temperature)
time.sleep(1)
device.subscribe("00020000-0001-11e1-ac36-0002a5d5c51b",callback=Battery)
time.sleep(1)
device.subscribe("00080000-0001-11e1-ac36-0002a5d5c51b",callback=Humidity)
time.sleep(1)
device.subscribe("00e00000-0001-11e1-ac36-0002a5d5c51b",callback=Motion)
time.sleep(1)
device.subscribe("00100000-0001-11e1-ac36-0002a5d5c51b",callback=Pressure)
time.sleep(1)
device.subscribe("04000000-0001-11e1-ac36-0002a5d5c51b",callback=Mic_Level)
time.sleep(1)
device.subscribe("04000000-0001-11e1-ac36-0002a5d5c51b",callback=offline)
while cont==1:
stdoutdata = sp.getoutput("hcitool con")
if not uuid.upper() in stdoutdata.split() or connectedWifi != whichConnectedWifi():
print("not connected")
client.loop_stop()
client.disconnect()
cont = 0
else:
print("connected")
except:
print("error")
myData={"error":"Couldn't connect to the sensiBLE"}
client.publish(topic, str(myData))
client.loop_stop()
client.disconnect()
finally:
adapter.stop()
#Launch wifi scan and sensible thread
def launcher():
wifi_thread = threading.Thread(target=w,args=())
sensi_thread = threading.Thread(target=senddata,args=())
while 1:
if wifi_thread.is_alive() == False:
wifi_thread.start()
if sensi_thread.is_alive() == False:
sensi_thread.start()
launcher()
|
ipython_memory_usage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import os
import time
import memory_profiler
from IPython import get_ipython
__version__ = 1.0 # set to desired value.
# To run: %run -i ipython_memory_usage.py
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time() # will be set to current time later
keep_watching = True
peak_memory_usage = -1
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
"""Register memory profiling tools to IPython instance."""
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
"""Unregister memory profiling tools from IPython instance."""
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
peaked_memory_usage = max(0, peak_memory_usage - new_memory_usage)
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, peaked {peaked_memory_usage:0.2f} "
"MiB above current, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
peaked_memory_usage=peaked_memory_usage,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def during_execution_memory_sampler():
import time
import memory_profiler
global keep_watching, peak_memory_usage
peak_memory_usage = -1
keep_watching = True
n = 0
WAIT_BETWEEN_SAMPLES_SECS = 0.001
MAX_ITERATIONS = 60.0 / WAIT_BETWEEN_SAMPLES_SECS
while True:
mem_usage = memory_profiler.memory_usage()[0]
peak_memory_usage = max(mem_usage, peak_memory_usage)
time.sleep(WAIT_BETWEEN_SAMPLES_SECS)
if not keep_watching or n > MAX_ITERATIONS:
# exit if we've been told our command has finished or if it has run
# for more than a sane amount of time (e.g. maybe something crashed
# and we don't want this to carry on running)
if n > MAX_ITERATIONS:
print("{} SOMETHING WEIRD HAPPENED AND THIS RAN FOR TOO LONG, THIS THREAD IS KILLING ITSELF".format(__file__))
break
n += 1
def pre_run_cell():
"""Capture current time before we execute the current command"""
import time
global t1
t1 = time.time()
# start a thread that samples RAM usage until the current command finishes
import threading
ipython_memory_usage_thread = threading.Thread(target=during_execution_memory_sampler)
ipython_memory_usage_thread.daemon = True
ipython_memory_usage_thread.start()
|
test_pool.py
|
import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not_(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select([1])).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not_(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
base.py
|
from abc import abstractmethod
import asyncio
from pathlib import Path
from multiprocessing.managers import (
BaseManager,
)
from threading import Thread
from typing import (
List,
Type,
Union
)
from evm.chains.base import BaseChain
from p2p.peer import (
PeerPool
)
from p2p.service import (
BaseService,
EmptyService,
)
from trinity.chains import (
ChainProxy,
)
from trinity.chains.header import (
AsyncHeaderChainProxy,
)
from trinity.db.chain import ChainDBProxy
from trinity.db.base import DBProxy
from trinity.db.header import (
BaseAsyncHeaderDB,
AsyncHeaderDBProxy
)
from trinity.rpc.main import (
RPCServer,
)
from trinity.rpc.ipc import (
IPCServer,
)
from trinity.config import (
ChainConfig,
)
from trinity.tx_pool.pool import (
TxPool
)
from trinity.tx_pool.validators import (
DefaultTransactionValidator
)
class Node(BaseService):
"""
Create usable nodes by adding subclasses that define the following
unset attributes.
"""
chain_class: Type[BaseChain] = None
initial_tx_validation_block_number: int = None
def __init__(self, chain_config: ChainConfig) -> None:
super().__init__()
self._db_manager = create_db_manager(chain_config.database_ipc_path)
self._db_manager.connect() # type: ignore
self._headerdb = self._db_manager.get_headerdb() # type: ignore
self._jsonrpc_ipc_path: Path = chain_config.jsonrpc_ipc_path
self._auxiliary_services: List[BaseService] = []
@abstractmethod
def get_chain(self) -> BaseChain:
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_peer_pool(self) -> PeerPool:
"""
Return the PeerPool instance of the node
"""
raise NotImplementedError("Node classes must implement this method")
@abstractmethod
def get_p2p_server(self) -> BaseService:
"""
This is the main service that will be run, when calling :meth:`run`.
It's typically responsible for syncing the chain, with peer connections.
"""
raise NotImplementedError("Node classes must implement this method")
@property
def db_manager(self) -> BaseManager:
return self._db_manager
@property
def headerdb(self) -> BaseAsyncHeaderDB:
return self._headerdb
def add_service(self, service: BaseService) -> None:
if self.is_running:
raise RuntimeError("Cannot add an auxiliary service while the node is running")
else:
self._auxiliary_services.append(service)
def create_and_add_tx_pool(self) -> None:
self.tx_pool = TxPool(
self.get_peer_pool(),
DefaultTransactionValidator(self.get_chain(), self.initial_tx_validation_block_number),
self.cancel_token
)
self.add_service(self.tx_pool)
def make_ipc_server(self) -> Union[IPCServer, EmptyService]:
if self._jsonrpc_ipc_path:
rpc = RPCServer(self.get_chain(), self.get_peer_pool())
return IPCServer(rpc, self._jsonrpc_ipc_path)
else:
return EmptyService()
async def _run(self) -> None:
self._ipc_server = self.make_ipc_server()
# The RPC server needs its own thread, because it provides a synchcronous
# API which might call into p2p async methods. These sync->async calls
# deadlock if they are run in the same Thread and loop.
ipc_loop = self._make_new_loop_thread()
# keep a copy on self, for debugging
self._ipc_loop = ipc_loop
# FIXME: EmptyService doesn't share a common API with the IPCServer
asyncio.run_coroutine_threadsafe(
self._ipc_server.run(loop=ipc_loop), loop=ipc_loop # type: ignore
)
for service in self._auxiliary_services:
asyncio.ensure_future(service.run())
await self.get_p2p_server().run()
async def _cleanup(self) -> None:
if isinstance(self._ipc_server, IPCServer):
await self._ipc_server.stop()
await asyncio.gather(*[service.cleaned_up.wait() for service in self._auxiliary_services])
def _make_new_loop_thread(self) -> asyncio.AbstractEventLoop:
new_loop = asyncio.new_event_loop()
def start_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
thread = Thread(target=start_loop, args=(new_loop, ))
thread.start()
return new_loop
def create_db_manager(ipc_path: Path) -> BaseManager:
"""
We're still using 'str' here on param ipc_path because an issue with
multi-processing not being able to interpret 'Path' objects correctly
"""
class DBManager(BaseManager):
pass
# Typeshed definitions for multiprocessing.managers is incomplete, so ignore them for now:
# https://github.com/python/typeshed/blob/85a788dbcaa5e9e9a62e55f15d44530cd28ba830/stdlib/3/multiprocessing/managers.pyi#L3
DBManager.register('get_db', proxytype=DBProxy) # type: ignore
DBManager.register('get_chaindb', proxytype=ChainDBProxy) # type: ignore
DBManager.register('get_chain', proxytype=ChainProxy) # type: ignore
DBManager.register('get_headerdb', proxytype=AsyncHeaderDBProxy) # type: ignore
DBManager.register('get_header_chain', proxytype=AsyncHeaderChainProxy) # type: ignore
manager = DBManager(address=str(ipc_path)) # type: ignore
return manager
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash is not None:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {self.returncode}")
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if self.returncode != 0 or not harness.state:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan",
"bt_l2cap_br_fixec_chan",
"bt_gatt_service_static",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
self.suite.total_passed += 1
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS':
pass
elif instance.results[k] == 'SKIP':
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
app.py
|
import dispenser
import flask
import os
import logging
from time import sleep
import datetime
from coinbase_commerce.client import Client
from threading import Thread, Timer
event_dict = {}
polling = True
sample_lock = False
app = flask.Flask(__name__)
TYPE_CREATED = "charge:created"
TYPE_PENDING = "charge:pending"
API_KEY = os.environ.get("AIRDROPZ_CB_COMMERCE_KEY")
commerce_client = Client(api_key=API_KEY)
connected = False
while not connected:
print("Attempting to connect to Coinbase Commerce..")
try:
commerce_client = Client(api_key=API_KEY)
test = commerce_client.event.list().data[0]
if test is not None:
connected = True
print("Connected..")
except Exception as e:
print("Unable to connect to Coinbase Commerce..retrying")
logging.error('Error!', exc_info=e)
sleep(3)
@app.route('/', methods=['GET'])
def home():
return flask.render_template('index.html')
@app.route('/brewmaster', methods=["GET"])
def sample():
global sample_lock
lock = sample_lock
if lock:
return "Samples are locked at this time."
else:
print("> Have one, on the house!")
dispenser.execute()
sample_lock = True
next_sample = datetime.datetime.now() + datetime.timedelta(minutes=30)
print(">> Locking next free sample until %s" % next_sample.strftime('%H:%M:%S'))
lock_thread = Timer(60*30,sample_timeout)
lock_thread.start()
return "Have one on the house 🍻\n!"
def sample_timeout():
sample_lock = False
print("> Another round? (Free samples are now unlocked)")
return
def get_recent_event():
event = commerce_client.event.list().data[0]
# Check if event_id is in event_dict
event_data = {
'created_at': event.created_at,
'code':event.data.code,
'id':event.id, 'type':event.type
}
return event_data
def start_polling_events():
# we ignore the first event and don't dispense anything
first = get_recent_event()
first_id = first['id']
event_dict[first_id] = first
while polling:
print("Polling...")
sleep(5)
event_data = get_recent_event()
id = event_data['id']
current_type = event_data['type']
print("-- Received event %s" % id)
if id not in event_dict:
print("--- Event is new..")
# Transaction not added
event_dict[id] = event_data
if current_type == TYPE_PENDING:
print('---- Payment PENDING made successfully.')
# The transaction is not new dispense
dispenser.execute()
print("---- Charge completed!")
else:
print("--- Event exists..")
last_type = event_dict[id]['type']
if last_type == TYPE_CREATED and current_type != last_type:
print('---- Payment PENDING made successfully.')
# set the new data in the event dict
event_dict[id] = event_data
dispenser.execute()
print("---- Charge completed.")
print("\n")
polling_thread = Thread(target = start_polling_events, name="polling_thread")
polling_thread.daemon = True
polling_thread.start()
app.run(host="0.0.0.0")
|
artifacts.py
|
import json
import mimetypes
import os
import pickle
from six.moves.urllib.parse import quote
from copy import deepcopy
from datetime import datetime
from multiprocessing import RLock, Event
from multiprocessing.pool import ThreadPool
from tempfile import mkdtemp, mkstemp
from threading import Thread
from time import time
from zipfile import ZipFile, ZIP_DEFLATED
import six
from PIL import Image
from pathlib2 import Path
from six.moves.urllib.parse import urlparse
from typing import Dict, Union, Optional, Any, Sequence
from ..backend_api import Session
from ..backend_api.services import tasks
from ..backend_interface.metrics.events import UploadEvent
from ..debugging.log import LoggerRoot
from ..storage.helper import remote_driver_schemes
from ..storage.util import sha256sum, format_size, get_common_path
from ..utilities.proxy_object import LazyEvalWrapper
try:
import pandas as pd
DataFrame = pd.DataFrame
except ImportError:
pd = None
DataFrame = None
try:
import numpy as np
except ImportError:
np = None
try:
from pathlib import Path as pathlib_Path
except ImportError:
pathlib_Path = None
class Artifact(object):
"""
Read-Only Artifact object
"""
@property
def url(self):
# type: () -> str
"""
:return: The URL of uploaded artifact.
"""
return self._url
@property
def name(self):
# type: () -> str
"""
:return: The name of artifact.
"""
return self._name
@property
def size(self):
# type: () -> int
"""
:return: The size in bytes of artifact.
"""
return self._size
@property
def type(self):
# type: () -> str
"""
:return: The type (str) of of artifact.
"""
return self._type
@property
def mode(self):
# type: () -> Union["input", "output"] # noqa: F821
"""
:return: The mode (str) of of artifact: "input" or "output".
"""
return self._mode
@property
def hash(self):
# type: () -> str
"""
:return: SHA2 hash (str) of of artifact content.
"""
return self._hash
@property
def timestamp(self):
# type: () -> datetime
"""
:return: Timestamp (datetime) of uploaded artifact.
"""
return self._timestamp
@property
def metadata(self):
# type: () -> Optional[Dict[str, str]]
"""
:return: Key/Value dictionary attached to artifact.
"""
return self._metadata
@property
def preview(self):
# type: () -> str
"""
:return: A string (str) representation of the artifact.
"""
return self._preview
def __init__(self, artifact_api_object):
"""
construct read-only object from api artifact object
:param tasks.Artifact artifact_api_object:
"""
self._name = artifact_api_object.key
self._size = artifact_api_object.content_size
self._type = artifact_api_object.type
self._mode = artifact_api_object.mode
self._url = artifact_api_object.uri
self._hash = artifact_api_object.hash
self._timestamp = datetime.fromtimestamp(artifact_api_object.timestamp or 0)
self._metadata = dict(artifact_api_object.display_data) if artifact_api_object.display_data else {}
self._preview = artifact_api_object.type_data.preview if artifact_api_object.type_data else None
self._object = None
def get(self, force_download=False):
# type: (bool) -> Any
"""
Return an object constructed from the artifact file
Currently supported types: Numpy.array, pandas.DataFrame, PIL.Image, dict (json)
All other types will return a pathlib2.Path object pointing to a local copy of the artifacts file (or directory)
:param bool force_download: download file from remote even if exists in local cache
:return: One of the following objects Numpy.array, pandas.DataFrame, PIL.Image, dict (json), or pathlib2.Path.
"""
if self._object:
return self._object
local_file = self.get_local_copy(raise_on_error=True, force_download=force_download)
# noinspection PyProtectedMember
if self.type == 'numpy' and np:
self._object = np.load(local_file)[self.name]
elif self.type == Artifacts._pd_artifact_type and pd:
self._object = pd.read_csv(local_file)
elif self.type == 'pandas' and pd:
self._object = pd.read_csv(local_file, index_col=[0])
elif self.type == 'image':
self._object = Image.open(local_file)
elif self.type == 'JSON':
with open(local_file, 'rt') as f:
self._object = json.load(f)
elif self.type == 'string':
with open(local_file, 'rt') as f:
self._object = f.read()
elif self.type == 'pickle':
with open(local_file, 'rb') as f:
self._object = pickle.load(f)
local_file = Path(local_file)
if self._object is None:
self._object = local_file
return self._object
def get_local_copy(self, extract_archive=True, raise_on_error=False, force_download=False):
# type: (bool, bool, bool) -> str
"""
:param bool extract_archive: If True and artifact is of type 'archive' (compressed folder)
The returned path will be a temporary folder containing the archive content
:param bool raise_on_error: If True and the artifact could not be downloaded,
raise ValueError, otherwise return None on failure and output log warning.
:param bool force_download: download file from remote even if exists in local cache
:raise: Raises error if local copy not found.
:return: A local path to a downloaded copy of the artifact.
"""
from clearml.storage import StorageManager
local_copy = StorageManager.get_local_copy(
remote_url=self.url,
extract_archive=extract_archive and self.type == 'archive',
name=self.name,
force_download=force_download
)
if raise_on_error and local_copy is None:
raise ValueError(
"Could not retrieve a local copy of artifact {}, failed downloading {}".format(self.name, self.url))
return local_copy
def __repr__(self):
return str({'name': self.name, 'size': self.size, 'type': self.type, 'mode': self.mode, 'url': self.url,
'hash': self.hash, 'timestamp': self.timestamp,
'metadata': self.metadata, 'preview': self.preview, })
class Artifacts(object):
max_preview_size_bytes = 65536
_flush_frequency_sec = 300.
# notice these two should match
_save_format = '.csv.gz'
_compression = 'gzip'
# hashing constants
_hash_block_size = 65536
_pd_artifact_type = 'data-audit-table'
class _ProxyDictWrite(dict):
""" Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
def __init__(self, artifacts_manager, *args, **kwargs):
super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
self._artifacts_manager = artifacts_manager
# list of artifacts we should not upload (by name & weak-reference)
self.artifact_metadata = {}
# list of hash columns to calculate uniqueness for the artifacts
self.artifact_hash_columns = {}
def __setitem__(self, key, value):
# check that value is of type pandas
if pd and isinstance(value, pd.DataFrame):
super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)
if self._artifacts_manager:
self._artifacts_manager.flush()
else:
raise ValueError('Artifacts currently support pandas.DataFrame objects only')
def unregister_artifact(self, name):
self.artifact_metadata.pop(name, None)
self.pop(name, None)
def add_metadata(self, name, metadata):
self.artifact_metadata[name] = deepcopy(metadata)
def get_metadata(self, name):
return self.artifact_metadata.get(name)
def add_hash_columns(self, artifact_name, hash_columns):
self.artifact_hash_columns[artifact_name] = hash_columns
def get_hash_columns(self, artifact_name):
return self.artifact_hash_columns.get(artifact_name)
@property
def registered_artifacts(self):
# type: () -> Dict[str, Artifact]
return self._artifacts_container
@property
def summary(self):
# type: () -> str
return self._summary
def __init__(self, task):
self._task = task
# notice the double link, this is important since the Artifact
# dictionary needs to signal the Artifacts base on changes
self._artifacts_container = self._ProxyDictWrite(self)
self._last_artifacts_upload = {}
self._unregister_request = set()
self._thread = None
self._flush_event = Event()
self._exit_flag = False
self._summary = ''
self._temp_folder = []
self._task_artifact_list = []
self._task_edit_lock = RLock()
self._storage_prefix = None
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, DataFrame, Optional[dict], Union[bool, Sequence[str]]) -> ()
"""
:param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
:param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
:param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
:param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
is True, which equals to all the columns (same as artifact.columns).
"""
# currently we support pandas.DataFrame (which we will upload as csv.gz)
if name in self._artifacts_container:
LoggerRoot.get_base_logger().info('Register artifact, overwriting existing artifact \"{}\"'.format(name))
self._artifacts_container.add_hash_columns(
name, list(artifact.columns if uniqueness_columns is True else uniqueness_columns)
)
self._artifacts_container[name] = artifact
if metadata:
self._artifacts_container.add_metadata(name, metadata)
def unregister_artifact(self, name):
# type: (str) -> ()
# Remove artifact from the watch list
self._unregister_request.add(name)
self.flush()
def upload_artifact(self, name, artifact_object=None, metadata=None, preview=None,
delete_after_upload=False, auto_pickle=True, wait_on_upload=False):
# type: (str, Optional[object], Optional[dict], Optional[str], bool, bool, bool) -> bool
if not Session.check_min_api_version('2.3'):
LoggerRoot.get_base_logger().warning('Artifacts not supported by your ClearML-server version, '
'please upgrade to the latest server version')
return False
if name in self._artifacts_container:
raise ValueError("Artifact by the name of {} is already registered, use register_artifact".format(name))
# cast preview to string
if preview not in (None, False):
preview = str(preview)
# evaluate lazy proxy object
if isinstance(artifact_object, LazyEvalWrapper):
# noinspection PyProtectedMember
artifact_object = LazyEvalWrapper._load_object(artifact_object)
pathlib_types = (Path, pathlib_Path,) if pathlib_Path is not None else (Path,)
local_filename = None
# try to convert string Path object (it might reference a file/folder)
# dont not try to serialize long texts.
if isinstance(artifact_object, six.string_types) and len(artifact_object) < 2048:
# noinspection PyBroadException
try:
artifact_path = Path(artifact_object)
if artifact_path.exists():
artifact_object = artifact_path
elif '*' in artifact_object or '?' in artifact_object:
# hackish, detect wildcard in tr files
folder = Path('').joinpath(*artifact_path.parts[:-1])
if folder.is_dir() and folder.parts:
wildcard = artifact_path.parts[-1]
if list(Path(folder).rglob(wildcard)):
artifact_object = artifact_path
except Exception:
pass
store_as_pickle = False
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.preview = ''
override_filename_in_uri = None
override_filename_ext_in_uri = None
uri = None
if np and isinstance(artifact_object, np.ndarray):
artifact_type = 'numpy'
artifact_type_data.content_type = 'application/numpy'
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = '.npz'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
np.savez_compressed(local_filename, **{name: artifact_object})
delete_after_upload = True
elif pd and isinstance(artifact_object, pd.DataFrame):
artifact_type = 'pandas'
artifact_type_data.content_type = 'text/csv'
artifact_type_data.preview = preview or str(artifact_object.__repr__())
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.to_csv(local_filename, compression=self._compression)
delete_after_upload = True
elif isinstance(artifact_object, Image.Image):
artifact_type = 'image'
artifact_type_data.content_type = 'image/png'
desc = str(artifact_object.__repr__())
artifact_type_data.preview = preview or desc[1:desc.find(' at ')]
override_filename_ext_in_uri = '.png'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
artifact_object.save(local_filename)
delete_after_upload = True
elif isinstance(artifact_object, dict):
artifact_type = 'JSON'
artifact_type_data.content_type = 'application/json'
# noinspection PyBroadException
try:
json_text = json.dumps(artifact_object, sort_keys=True, indent=4)
except Exception:
if not auto_pickle:
raise
LoggerRoot.get_base_logger().warning(
"JSON serialization of artifact \'{}\' failed, reverting to pickle".format(name))
store_as_pickle = True
json_text = None
if json_text is not None:
override_filename_ext_in_uri = '.json'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.write(fd, bytes(json_text.encode()))
os.close(fd)
preview = preview or json_text
if len(preview) < self.max_preview_size_bytes:
artifact_type_data.preview = preview
else:
artifact_type_data.preview = '# full json too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes//1024, preview[:self.max_preview_size_bytes]
)
delete_after_upload = True
elif isinstance(artifact_object, pathlib_types):
# check if single file
artifact_object = Path(artifact_object)
artifact_object.expanduser().absolute()
# noinspection PyBroadException
try:
create_zip_file = not artifact_object.is_file()
except Exception: # Hack for windows pathlib2 bug, is_file isn't valid.
create_zip_file = True
else: # We assume that this is not Windows os
if artifact_object.is_dir():
# change to wildcard
artifact_object /= '*'
if create_zip_file:
folder = Path('').joinpath(*artifact_object.parts[:-1])
if not folder.is_dir() or not folder.parts:
raise ValueError("Artifact file/folder '{}' could not be found".format(
artifact_object.as_posix()))
wildcard = artifact_object.parts[-1]
files = list(Path(folder).rglob(wildcard))
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = folder.parts[-1] + override_filename_ext_in_uri
fd, zip_file = mkstemp(
prefix=quote(folder.parts[-1], safe="") + '.', suffix=override_filename_ext_in_uri
)
try:
artifact_type_data.content_type = 'application/zip'
archive_preview = 'Archive content {}:\n'.format(artifact_object.as_posix())
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(files):
if filename.is_file():
relative_file_name = filename.relative_to(folder).as_posix()
archive_preview += '{} - {}\n'.format(
relative_file_name, format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact folder {}'.format(
folder, e))
return False
finally:
os.close(fd)
artifact_type_data.preview = preview or archive_preview
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
else:
if not artifact_object.is_file():
raise ValueError("Artifact file '{}' could not be found".format(artifact_object.as_posix()))
override_filename_in_uri = artifact_object.parts[-1]
artifact_type_data.preview = preview or '{} - {}\n'.format(
artifact_object, format_size(artifact_object.stat().st_size))
artifact_object = artifact_object.as_posix()
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
elif isinstance(artifact_object, (list, tuple)) and all(isinstance(p, pathlib_types) for p in artifact_object):
# find common path if exists
list_files = [Path(p) for p in artifact_object]
override_filename_ext_in_uri = '.zip'
override_filename_in_uri = quote(name, safe="") + override_filename_ext_in_uri
common_path = get_common_path(list_files)
fd, zip_file = mkstemp(
prefix='artifact_folder.', suffix=override_filename_ext_in_uri
)
try:
artifact_type_data.content_type = 'application/zip'
archive_preview = 'Archive content:\n'
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in sorted(list_files):
if filename.is_file():
relative_file_name = filename.relative_to(Path(common_path)).as_posix() \
if common_path else filename.as_posix()
archive_preview += '{} - {}\n'.format(
relative_file_name, format_size(filename.stat().st_size))
zf.write(filename.as_posix(), arcname=relative_file_name)
else:
LoggerRoot.get_base_logger().warning(
"Failed zipping artifact file '{}', file not found!".format(filename.as_posix()))
except Exception as e:
# failed uploading folder:
LoggerRoot.get_base_logger().warning('Exception {}\nFailed zipping artifact files {}'.format(
artifact_object, e))
return False
finally:
os.close(fd)
artifact_type_data.preview = preview or archive_preview
artifact_object = zip_file
artifact_type = 'archive'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
local_filename = artifact_object
delete_after_upload = True
elif (
isinstance(artifact_object, six.string_types) and len(artifact_object) < 4096
and urlparse(artifact_object).scheme in remote_driver_schemes
):
# we should not upload this, just register
local_filename = None
uri = artifact_object
artifact_type = 'custom'
artifact_type_data.content_type = mimetypes.guess_type(artifact_object)[0]
if preview:
artifact_type_data.preview = preview
elif isinstance(artifact_object, six.string_types):
# if we got here, we should store it as text file.
artifact_type = 'string'
artifact_type_data.content_type = 'text/plain'
if preview:
artifact_type_data.preview = preview
elif len(artifact_object) < self.max_preview_size_bytes:
artifact_type_data.preview = artifact_object
else:
artifact_type_data.preview = '# full text too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes//1024, artifact_object[:self.max_preview_size_bytes]
)
delete_after_upload = True
override_filename_ext_in_uri = '.txt'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, 'wt') as f:
f.write(artifact_object)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
elif auto_pickle:
# revert to pickling the object
store_as_pickle = True
else:
raise ValueError("Artifact type {} not supported".format(type(artifact_object)))
# revert to serializing the object with pickle
if store_as_pickle:
# if we are here it means we do not know what to do with the object, so we serialize it with pickle.
artifact_type = 'pickle'
artifact_type_data.content_type = 'application/pickle'
# noinspection PyBroadException
try:
artifact_type_data.preview = preview or str(artifact_object.__repr__())[:self.max_preview_size_bytes]
except Exception:
artifact_type_data.preview = preview or ''
delete_after_upload = True
override_filename_ext_in_uri = '.pkl'
override_filename_in_uri = name + override_filename_ext_in_uri
fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
# noinspection PyBroadException
try:
with open(local_filename, 'wb') as f:
pickle.dump(artifact_object, f)
except Exception:
# cleanup and raise exception
os.unlink(local_filename)
raise
# verify preview not out of scope:
if artifact_type_data.preview and len(artifact_type_data.preview) > (self.max_preview_size_bytes+1024):
artifact_type_data.preview = '# full preview too large to store, storing first {}kb\n{}'.format(
self.max_preview_size_bytes // 1024, artifact_type_data.preview[:self.max_preview_size_bytes]
)
# remove from existing list, if exists
for artifact in self._task_artifact_list:
if artifact.key == name:
if artifact.type == self._pd_artifact_type:
raise ValueError("Artifact of name {} already registered, "
"use register_artifact instead".format(name))
self._task_artifact_list.remove(artifact)
break
if not local_filename:
file_size = None
file_hash = None
else:
# check that the file to upload exists
local_filename = Path(local_filename).absolute()
if not local_filename.exists() or not local_filename.is_file():
LoggerRoot.get_base_logger().warning('Artifact upload failed, cannot find file {}'.format(
local_filename.as_posix()))
return False
file_hash, _ = sha256sum(local_filename.as_posix(), block_size=Artifacts._hash_block_size)
file_size = local_filename.stat().st_size
uri = self._upload_local_file(local_filename, name,
delete_after_upload=delete_after_upload,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri,
wait_on_upload=wait_on_upload)
timestamp = int(time())
artifact = tasks.Artifact(key=name, type=artifact_type,
uri=uri,
content_size=file_size,
hash=file_hash,
timestamp=timestamp,
type_data=artifact_type_data,
display_data=[(str(k), str(v)) for k, v in metadata.items()] if metadata else None)
# update task artifacts
self._add_artifact(artifact)
return True
def flush(self):
# type: () -> ()
# start the thread if it hasn't already:
self._start()
# flush the current state of all artifacts
self._flush_event.set()
def stop(self, wait=True):
# type: (bool) -> ()
# stop the daemon thread and quit
# wait until thread exists
self._exit_flag = True
self._flush_event.set()
if wait:
if self._thread:
self._thread.join()
# remove all temp folders
for f in self._temp_folder:
# noinspection PyBroadException
try:
Path(f).rmdir()
except Exception:
pass
def _start(self):
# type: () -> ()
""" Start daemon thread if any artifacts are registered and thread is not up yet """
if not self._thread and self._artifacts_container:
# start the daemon thread
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def _daemon(self):
# type: () -> ()
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency_sec)
self._flush_event.clear()
artifact_keys = list(self._artifacts_container.keys())
for name in artifact_keys:
try:
self._upload_data_audit_artifacts(name)
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
# create summary
self._summary = self._get_statistics()
def _add_artifact(self, artifact):
if not self._task:
raise ValueError("Task object not set")
with self._task_edit_lock:
if artifact not in self._task_artifact_list:
self._task_artifact_list.append(artifact)
# noinspection PyProtectedMember
self._task._add_artifacts(self._task_artifact_list)
def _upload_data_audit_artifacts(self, name):
# type: (str) -> ()
logger = self._task.get_logger()
pd_artifact = self._artifacts_container.get(name)
pd_metadata = self._artifacts_container.get_metadata(name)
# remove from artifacts watch list
if name in self._unregister_request:
try:
self._unregister_request.remove(name)
except KeyError:
pass
self._artifacts_container.unregister_artifact(name)
if pd_artifact is None:
return
override_filename_ext_in_uri = self._save_format
override_filename_in_uri = name
fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri)
os.close(fd)
local_csv = Path(local_csv)
pd_artifact.to_csv(local_csv.as_posix(), index=False, compression=self._compression)
current_sha2, file_sha2 = sha256sum(
local_csv.as_posix(), skip_header=32, block_size=Artifacts._hash_block_size)
if name in self._last_artifacts_upload:
previous_sha2 = self._last_artifacts_upload[name]
if previous_sha2 == current_sha2:
# nothing to do, we can skip the upload
# noinspection PyBroadException
try:
local_csv.unlink()
except Exception:
pass
return
self._last_artifacts_upload[name] = current_sha2
# If old clearml-server, upload as debug image
if not Session.check_min_api_version('2.3'):
logger.report_image(title='artifacts', series=name, local_path=local_csv.as_posix(),
delete_after_upload=True, iteration=self._task.get_last_iteration(),
max_image_history=2)
return
# Find our artifact
artifact = None
for an_artifact in self._task_artifact_list:
if an_artifact.key == name:
artifact = an_artifact
break
file_size = local_csv.stat().st_size
# upload file
uri = self._upload_local_file(local_csv, name, delete_after_upload=True,
override_filename=override_filename_in_uri,
override_filename_ext=override_filename_ext_in_uri)
# update task artifacts
with self._task_edit_lock:
if not artifact:
artifact = tasks.Artifact(key=name, type=self._pd_artifact_type)
artifact_type_data = tasks.ArtifactTypeData()
artifact_type_data.data_hash = current_sha2
artifact_type_data.content_type = "text/csv"
artifact_type_data.preview = str(pd_artifact.__repr__(
)) + '\n\n' + self._get_statistics({name: pd_artifact})
artifact.type_data = artifact_type_data
artifact.uri = uri
artifact.content_size = file_size
artifact.hash = file_sha2
artifact.timestamp = int(time())
artifact.display_data = [(str(k), str(v)) for k, v in pd_metadata.items()] if pd_metadata else None
self._add_artifact(artifact)
def _upload_local_file(
self, local_file, name, delete_after_upload=False, override_filename=None, override_filename_ext=None,
wait_on_upload=False
):
# type: (str, str, bool, Optional[str], Optional[str], bool) -> str
"""
Upload local file and return uri of the uploaded file (uploading in the background)
"""
from clearml.storage import StorageManager
upload_uri = self._task.output_uri or self._task.get_logger().get_default_upload_destination()
if not isinstance(local_file, Path):
local_file = Path(local_file)
ev = UploadEvent(metric='artifacts', variant=name,
image_data=None, upload_uri=upload_uri,
local_image_path=local_file.as_posix(),
delete_after_upload=delete_after_upload,
override_filename=override_filename,
override_filename_ext=override_filename_ext,
override_storage_key_prefix=self._get_storage_uri_prefix())
_, uri = ev.get_target_full_upload_uri(upload_uri, quote_uri=False)
# send for upload
# noinspection PyProtectedMember
if wait_on_upload:
StorageManager.upload_file(local_file.as_posix(), uri, wait_for_upload=True, retries=ev.retries)
if delete_after_upload:
try:
os.unlink(local_file.as_posix())
except OSError:
LoggerRoot.get_base_logger().warning('Failed removing temporary {}'.format(local_file))
else:
self._task._reporter._report(ev)
_, quoted_uri = ev.get_target_full_upload_uri(upload_uri)
return quoted_uri
def _get_statistics(self, artifacts_dict=None):
# type: (Optional[Dict[str, Artifact]]) -> str
summary = ''
artifacts_dict = artifacts_dict or self._artifacts_container
thread_pool = ThreadPool()
try:
# build hash row sets
artifacts_summary = []
for a_name, a_df in artifacts_dict.items():
hash_cols = self._artifacts_container.get_hash_columns(a_name)
if not pd or not isinstance(a_df, pd.DataFrame):
continue
if hash_cols is True:
hash_col_drop = []
else:
hash_cols = set(hash_cols)
missing_cols = hash_cols.difference(a_df.columns)
if missing_cols == hash_cols:
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. '
'Skipping uniqueness check for artifact.'.format(list(missing_cols), a_name)
)
continue
elif missing_cols:
# missing_cols must be a subset of hash_cols
hash_cols.difference_update(missing_cols)
LoggerRoot.get_base_logger().warning(
'Uniqueness columns {} not found in artifact {}. Using {}.'.format(
list(missing_cols), a_name, list(hash_cols)
)
)
hash_col_drop = [col for col in a_df.columns if col not in hash_cols]
a_unique_hash = set()
def hash_row(r):
a_unique_hash.add(hash(bytes(r)))
a_shape = a_df.shape
# parallelize
a_hash_cols = a_df.drop(columns=hash_col_drop)
thread_pool.map(hash_row, a_hash_cols.values)
# add result
artifacts_summary.append((a_name, a_shape, a_unique_hash,))
# build intersection summary
for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
name=name, shape=shape, unique=len(unique_hash),
percentage=100 * len(unique_hash) / float(shape[0]))
for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
intersection = len(unique_hash & unique_hash2)
summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
name2=name2, intersection=intersection,
percentage=100 * intersection / float(len(unique_hash2)))
except Exception as e:
LoggerRoot.get_base_logger().warning(str(e))
finally:
thread_pool.close()
thread_pool.terminate()
return summary
def _get_temp_folder(self, force_new=False):
# type: (bool) -> str
if force_new or not self._temp_folder:
new_temp = mkdtemp(prefix='artifacts_')
self._temp_folder.append(new_temp)
return new_temp
return self._temp_folder[0]
def _get_storage_uri_prefix(self):
# type: () -> str
if not self._storage_prefix:
# noinspection PyProtectedMember
self._storage_prefix = self._task._get_output_destination_suffix()
return self._storage_prefix
|
test_enum.py
|
import enum
import inspect
import pydoc
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn(3, Season)
with self.assertWarns(DeprecationWarning):
self.assertNotIn('AUTUMN', Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_member_contains(self):
self.assertRaises(TypeError, lambda: 'test' in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 3 in self.Season.AUTUMN)
self.assertRaises(TypeError, lambda: 'AUTUMN' in self.Season.AUTUMN)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
self.assertRaises(ValueError, Color, 7)
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
class StrEnum(str, Enum):
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return str.__new__(cls, *args, **kwargs)
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('BLACK' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RO' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Color = self.Color
Open = self.Open
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('GREEN' in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse('RW' in Open)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Color)
with self.assertWarns(DeprecationWarning):
self.assertFalse(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertWarns(DeprecationWarning):
self.assertFalse('swallow' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
if __name__ == '__main__':
unittest.main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electroncash.util import bh2u, bfh
from electroncash import keystore
from electroncash.address import Address
from electroncash.bitcoin import COIN, TYPE_ADDRESS
from electroncash.networks import NetworkConstants
from electroncash.plugins import run_hook
from electroncash.i18n import _
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, ExcessiveFee,
UserCancelled)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
Address.show_cashaddr(config.get('show_cashaddr', False))
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.png"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electron-cash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (NetworkConstants.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electron Cash preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webbrowser.open("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('{}:{}?message=donation for {}'
.format(NetworkConstants.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/fyookball/electrum/issues\">https://github.com/fyookball/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electron Cash", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return '{:.2f} sats/byte'.format(fee_rate/1000)
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBCH'
if self.decimal_point == 8:
return 'BCH'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
URI = web.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.set_receive_address(self.wallet.get_receiving_address())
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.new_request_button.setEnabled(True)
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = web.create_URI(self.receive_address, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(self.receive_address_e.text(), amount,
message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin Cash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin Cash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_slider, 5, 1)
grid.addWidget(self.fee_e, 5, 2)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins()
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = QLineEdit()
cash_address.setReadOnly(True)
legacy_address = QLineEdit()
legacy_address.setReadOnly(True)
bitpay_address = QLineEdit()
bitpay_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
(bitpay_address, Address.FMT_BITPAY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
label = WWLabel(_(
"This tool helps convert between 3 address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format.\nThe BitPay format is deprecated and support is for "
"a transitional period only."
))
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
grid.addWidget(QLabel(_('Address to convert')), 0, 0)
grid.addWidget(source_address, 0, 1)
grid.addWidget(QLabel(_('Cash address')), 1, 0)
grid.addWidget(cash_address, 1, 1)
grid.addWidget(QLabel(_('Legacy address')), 2, 0)
grid.addWidget(legacy_address, 2, 1)
grid.addWidget(QLabel(_('BitPay address')), 3, 0)
grid.addWidget(bitpay_address, 3, 1)
w.setLayout(grid)
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
except:
verified = False
else:
verified = bitcoin.verify_message(address, sig, message)
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx)
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electroncash import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoincash URI
if data.lower().startswith(NetworkConstants.CASHADDR_PREFIX + ':'):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
tx = self.tx_from_text(file_content)
if len(tx_file_dict['input_values']) >= len(tx.inputs()):
for i in range(len(tx.inputs())):
tx._inputs[i]['value'] = tx_file_dict['input_values'][i]
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electroncash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
lines = []
for item in history:
if is_csv:
lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']])
else:
lines.append(item)
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText())
def enable_sweep():
sweep_button.setEnabled(bool(get_address_text()
and get_priv_keys()))
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
if not d.exec_():
return
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_priv_keys(), self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('show_cashaddr', False):
return QIcon(":icons/tab_converter.png")
else:
return QIcon(":icons/tab_converter_bw.png")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(not self.config.get('show_cashaddr', False))
def toggle_cashaddr_settings(self, state):
self.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.config.set_key('show_cashaddr', on)
Address.show_cashaddr(on)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
cashaddr_cb = QCheckBox(_('CashAddr address format'))
cashaddr_cb.setChecked(Address.FMT_UI == Address.FMT_CASHADDR)
cashaddr_cb.setToolTip(_("If unchecked, addresses are shown in legacy format"))
cashaddr_cb.stateChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((cashaddr_cb, None))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_maxfee(x):
m = maxfee_e.get_amount()
if m: self.config.set_key('max_fee_rate', m)
self.fee_slider.update()
def update_maxfee():
maxfee_e.setDisabled(False)
maxfee_label.setDisabled(False)
maxfee_label = HelpLabel(_('Max static fee'), _('Max value of the static fee slider'))
maxfee_e = BTCkBEdit(self.get_decimal_point)
maxfee_e.setAmount(self.config.max_fee_rate())
maxfee_e.textChanged.connect(on_maxfee)
update_maxfee()
fee_widgets.append((maxfee_label, maxfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BCH', 'mBCH', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BCH=1000mBCH.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BCH':
self.decimal_point = 8
elif unit_result == 'mBCH':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electroncash import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
self.show_transaction(new_tx)
|
server_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/server_rpc.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import functools
import logging
import threading
from king_phisher import errors
from king_phisher import geoip
from king_phisher import ipaddress
from king_phisher import version
from king_phisher.constants import ConnectionErrorReason
from king_phisher.server import signals
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
import advancedhttpserver
import pyotp
CONFIG_READABLE = (
'beef.hook_url',
'server.address.host',
'server.address.port',
'server.require_id',
'server.secret_id',
'server.tracking_image',
'server.web_root'
)
"""Configuration options that can be accessed by the client."""
CONFIG_WRITEABLE = ('beef.hook_url',)
"""Configuration options that can be changed by the client at run time."""
RPC_AUTH_HEADER = 'X-RPC-Auth'
"""The header which contains the RPC authorization / session token."""
VIEW_ROW_COUNT = 50
"""The default number of rows to return when one of the /view methods are called."""
database_tables = db_models.database_tables
database_table_objects = db_models.database_table_objects
rpc_logger = logging.getLogger('KingPhisher.Server.RPC')
def register_rpc(path, database_access=False, log_call=False):
"""
Register an RPC function with the HTTP request handler. This allows the
method to be remotely invoked using King Phisher's standard RPC interface.
If *database_access* is specified, a SQLAlchemy session will be passed as
the second argument, after the standard
:py:class:`~advancedhttpserver.RequestHandler` instance.
:param str path: The path for the RPC function.
:param bool database_access: Whether or not the function requires database access.
:param bool log_call: Whether or not to log the arguments which the function is called with.
"""
path = '^' + path + '$'
def decorator(function):
@functools.wraps(function)
def wrapper(handler_instance, *args, **kwargs):
if log_call and rpc_logger.isEnabledFor(logging.DEBUG):
args_repr = ', '.join(map(repr, args))
if kwargs:
for key, value in sorted(kwargs.items()):
args_repr += ", {0}={1!r}".format(key, value)
msg = "calling RPC method {0}({1})".format(function.__name__, args_repr)
if getattr(handler_instance, 'rpc_session', False):
msg = handler_instance.rpc_session.user + ' is ' + msg
rpc_logger.debug(msg)
signals.rpc_method_call.send(path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs)
if database_access:
session = db_manager.Session()
try:
result = function(handler_instance, session, *args, **kwargs)
finally:
session.close()
else:
result = function(handler_instance, *args, **kwargs)
signals.rpc_method_called.send(path[1:-1], request_handler=handler_instance, args=args, kwargs=kwargs, retval=result)
return result
advancedhttpserver.RegisterPath(path, is_rpc=True)(wrapper)
return wrapper
return decorator
@register_rpc('/ping', log_call=True)
def rpc_ping(handler):
"""
An RPC method that can be used by clients to assert the status
and responsiveness of this server.
:return: This method always returns True.
:rtype: bool
"""
return True
@register_rpc('/shutdown', log_call=True)
def rpc_shutdown(handler):
"""
This method can be used to shut down the server. This function will
return, however no subsequent requests will be processed.
.. warning::
This action will stop the server process and there is no
confirmation before it takes place.
"""
shutdown_thread = threading.Thread(target=handler.server.kp_shutdown)
shutdown_thread.start()
return
@register_rpc('/version', log_call=True)
def rpc_version(handler):
"""
Get the version information of the server. This returns a
dictionary with keys of version, version_info and rpc_api_version.
These values are provided for the client to determine
compatibility.
:return: A dictionary with version information.
:rtype: dict
"""
assert ipaddress.ip_address(handler.client_address[0]).is_loopback
vinfo = {
'rpc_api_version': version.rpc_api_version,
'version': version.version,
'version_info': version.version_info._asdict()
}
return vinfo
@register_rpc('/config/get')
def rpc_config_get(handler, option_name):
"""
Retrieve a value from the server's configuration.
:param str option_name: The name of the configuration option.
:return: The option's value.
"""
if isinstance(option_name, (list, tuple)):
option_names = option_name
option_values = {}
for option_name in option_names:
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
option_values[option_name] = handler.config.get(option_name)
return option_values
if not option_name in CONFIG_READABLE:
raise errors.KingPhisherPermissionError('permission denied to read config option: ' + option_name)
if handler.config.has_option(option_name):
return handler.config.get(option_name)
return
@register_rpc('/config/set')
def rpc_config_set(handler, options):
"""
Set options in the server's configuration. Any changes to the
server's configuration are not written to disk.
:param dict options: A dictionary of option names and values
"""
for option_name, option_value in options.items():
if not option_name in CONFIG_WRITEABLE:
raise errors.KingPhisherPermissionError('permission denied to write config option: ' + option_name)
handler.config.set(option_name, option_value)
return
@register_rpc('/campaign/new', database_access=True, log_call=True)
def rpc_campaign_new(self, session, name, description=None):
"""
Create a new King Phisher campaign and initialize the database
information.
:param str name: The new campaign's name.
:param str description: The new campaign's description.
:return: The ID of the new campaign.
:rtype: int
"""
if session.query(db_models.Campaign).filter_by(name=name).count():
raise ValueError('the specified campaign name already exists')
campaign = db_models.Campaign(name=name, description=description, user_id=self.rpc_session.user)
campaign.assert_session_has_permissions('c', self.rpc_session)
session.add(campaign)
session.commit()
return campaign.id
@register_rpc('/campaign/alerts/is_subscribed', database_access=True, log_call=True)
def rpc_campaign_alerts_is_subscribed(self, session, campaign_id):
"""
Check if the user is subscribed to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
:return: The alert subscription status.
:rtype: bool
"""
username = self.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
return query.count()
@register_rpc('/campaign/alerts/subscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_subscribe(handler, session, campaign_id):
"""
Subscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
if query.count() == 0:
subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username)
subscription.assert_session_has_permissions('c', handler.rpc_session)
session.add(subscription)
session.commit()
@register_rpc('/campaign/alerts/unsubscribe', database_access=True, log_call=True)
def rpc_campaign_alerts_unsubscribe(handler, session, campaign_id):
"""
Unsubscribe to alerts for the specified campaign.
:param int campaign_id: The ID of the campaign.
"""
username = handler.rpc_session.user
query = session.query(db_models.AlertSubscription)
query = query.filter_by(campaign_id=campaign_id, user_id=username)
subscription = query.first()
if subscription:
subscription.assert_session_has_permissions('d', handler.rpc_session)
session.delete(subscription)
session.commit()
@register_rpc('/campaign/landing_page/new', database_access=True, log_call=True)
def rpc_campaign_landing_page_new(handler, session, campaign_id, hostname, page):
"""
Add a landing page for the specified campaign. Landing pages refer
to resources that when visited by a user should cause the visit
counter to be incremented.
:param int campaign_id: The ID of the campaign.
:param str hostname: The hostname which will be used to serve the request.
:param str page: The request resource.
"""
hostname = hostname.split(':', 1)[0]
page = page.lstrip('/')
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page)
if query.count() == 0:
landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page)
landing_page.assert_session_has_permissions('c', handler.rpc_session)
session.add(landing_page)
session.commit()
@register_rpc('/campaign/message/new', database_access=True, log_call=True)
def rpc_campaign_message_new(handler, session, campaign_id, email_id, target_email, first_name, last_name, department_name=None):
"""
Record a message that has been sent as part of a campaign. These
details can be retrieved later for value substitution in template
pages.
:param int campaign_id: The ID of the campaign.
:param str email_id: The message id of the sent email.
:param str target_email: The email address that the message was sent to.
:param str first_name: The first name of the message's recipient.
:param str last_name: The last name of the message's recipient.
:param str department_name: The name of the company department that the message's recipient belongs to.
"""
department = None
if department_name is not None:
department = session.query(db_models.CompanyDepartment).filter_by(name=department_name).first()
if department is None:
department = db_models.CompanyDepartment(name=department_name)
department.assert_session_has_permissions('c', handler.rpc_session)
session.add(department)
session.commit()
message = db_models.Message()
message.id = email_id
message.campaign_id = campaign_id
message.target_email = target_email
message.first_name = first_name
message.last_name = last_name
if department is not None:
message.company_department_id = department.id
message.assert_session_has_permissions('c', handler.rpc_session)
session.add(message)
session.commit()
@register_rpc('/campaign/stats', database_access=True, log_call=True)
def rpc_campaign_stats(handler, session, campaign_id):
"""
Generate statistics regarding the specified campaign and return them in a
dictionary. The dictionary will contain the keys credentials,
credentials-unique, messages, visits, visits-unique. Values with unique in
the key are counted unique by the message id for which they are associated.
:param campaign_id: The unique ID of the campaign to generate statistics for.
:return: The statistics for the specified campaign.
:rtype: dict
"""
stats = {}
stats['credentials'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).count()
stats['credentials-unique'] = session.query(db_models.Credential).filter_by(campaign_id=campaign_id).distinct(db_models.Credential.message_id).count()
stats['messages'] = session.query(db_models.Message).filter_by(campaign_id=campaign_id).count()
stats['visits'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).count()
stats['visits-unique'] = session.query(db_models.Visit).filter_by(campaign_id=campaign_id).distinct(db_models.Visit.message_id).count()
return stats
@register_rpc('/db/table/count', database_access=True)
def rpc_database_count_rows(handler, session, table_name, query_filter=None):
"""
Get a count of the rows in the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: The number of matching rows.
:rtype: int
"""
table = database_table_objects.get(table_name)
assert table
query_filter = query_filter or {}
columns = database_tables[table_name]
for column in query_filter.keys():
assert column in columns
query = session.query(table)
query = query.filter_by(**query_filter)
return query.count()
@register_rpc('/db/table/view', database_access=True)
def rpc_database_view_rows(handler, session, table_name, page=0, query_filter=None):
"""
Retrieve the rows from the specified table where the search
criteria matches.
:param str table_name: The name of the database table to query.
:param int page: The page number to retrieve results for.
:param dict query_filter: A dictionary mapping optional search criteria for matching the query.
:return: A dictionary with columns and rows keys.
:rtype: dict
"""
table = database_table_objects.get(table_name)
assert table
query_filter = query_filter or {}
columns = database_tables[table_name]
for column in query_filter.keys():
assert column in columns
offset = page * VIEW_ROW_COUNT
# it's critical that the columns are in the order that the client is expecting
rows = []
query = session.query(table)
query = query.filter_by(**query_filter)
total_rows = query.count()
for row in query[offset:]:
if len(rows) == VIEW_ROW_COUNT:
break
if row.session_has_permissions('r', handler.rpc_session):
rows.append([getattr(row, c) for c in columns])
if not len(rows):
return None
return {'columns': columns, 'rows': rows, 'total_rows': total_rows, 'page_size': VIEW_ROW_COUNT}
@register_rpc('/db/table/delete', database_access=True, log_call=True)
def rpc_database_delete_row_by_id(handler, session, table_name, row_id):
"""
Delete the row from the table with the specified value in the id column.
If the row does not exist, no error is raised.
:param str table_name: The name of the database table to delete a row from.
:param row_id: The id value.
"""
table = database_table_objects.get(table_name)
assert table
row = db_manager.get_row_by_id(session, table, row_id)
if row is None:
logger = logging.getLogger('KingPhisher.Server.API.RPC')
logger.debug("received delete request for non existing row with id {0} from table {1}".format(row_id, table_name))
return
row.assert_session_has_permissions('d', handler.rpc_session)
session.delete(row)
session.commit()
@register_rpc('/db/table/delete/multi', database_access=True, log_call=True)
def rpc_database_delete_rows_by_id(handler, session, table_name, row_ids):
"""
Delete multiple rows from a table with the specified values in the id
column. If a row id specified in *row_ids* does not exist, then it will
be skipped and no error will be thrown.
:param str table_name: The name of the database table to delete rows from.
:param list row_ids: The row ids to delete.
:return: The row ids that were deleted.
:rtype: list
"""
table = database_table_objects.get(table_name)
assert table
deleted_rows = []
for row_id in row_ids:
row = db_manager.get_row_by_id(session, table, row_id)
if not row:
continue
if not row.session_has_permissions('d', handler.rpc_session):
continue
session.delete(row)
deleted_rows.append(row_id)
session.commit()
return deleted_rows
@register_rpc('/db/table/get', database_access=True)
def rpc_database_get_row_by_id(handler, session, table_name, row_id):
"""
Retrieve a row from a given table with the specified value in the
id column.
:param str table_name: The name of the database table to retrieve a row from.
:param row_id: The id value.
:return: The specified row data.
:rtype: dict
"""
table = database_table_objects.get(table_name)
assert table
columns = database_tables[table_name]
row = db_manager.get_row_by_id(session, table, row_id)
if row:
row.assert_session_has_permissions('r', handler.rpc_session)
row = dict(zip(columns, (getattr(row, c) for c in columns)))
return row
@register_rpc('/db/table/insert', database_access=True)
def rpc_database_insert_row(handler, session, table_name, keys, values):
"""
Insert a new row into the specified table.
:param str table_name: The name of the database table to insert a new row into.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be inserted in the row.
:return: The id of the new row that has been added.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
assert len(keys) == len(values)
for key, value in zip(keys, values):
assert key in database_tables[table_name]
table = database_table_objects.get(table_name)
assert table
row = table()
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('c', handler.rpc_session)
session.add(row)
session.commit()
return row.id
@register_rpc('/db/table/set', database_access=True)
def rpc_database_set_row_value(handler, session, table_name, row_id, keys, values):
"""
Set values for a row in the specified table with an id of *row_id*.
:param str table_name: The name of the database table to set the values of the specified row.
:param tuple keys: The column names of *values*.
:param tuple values: The values to be updated in the row.
"""
if not isinstance(keys, (list, tuple)):
keys = (keys,)
if not isinstance(values, (list, tuple)):
values = (values,)
assert len(keys) == len(values)
for key, value in zip(keys, values):
assert key in database_tables[table_name]
table = database_table_objects.get(table_name)
assert table
row = db_manager.get_row_by_id(session, table, row_id)
assert row
for key, value in zip(keys, values):
setattr(row, key, value)
row.assert_session_has_permissions('u', handler.rpc_session)
session.commit()
@register_rpc('/geoip/lookup', log_call=True)
def rpc_geoip_lookup(handler, ip, lang=None):
"""
Look up an IP address in the servers GeoIP database. If the IP address
can not be found in the database, None will be returned.
:param str ip: The IP address to look up.
:param str lang: The language to prefer for regional names.
:return: The geographic information for the specified IP address.
:rtype: dict
"""
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
return result
@register_rpc('/geoip/lookup/multi', log_call=True)
def rpc_geoip_lookup_multi(handler, ips, lang=None):
"""
Look up multiple IP addresses in the servers GeoIP database. Each IP
address that can not be found in the database will have its result set
to None.
:param list ips: The list of IP addresses to look up.
:param str lang: The language to prefer for regional names.
:return: A dictionary containing the results keyed by the specified IP
addresses.
:rtype: dict
"""
results = {}
for ip in ips:
try:
result = geoip.lookup(ip, lang=lang)
except geoip.AddressNotFoundError:
result = None
results[ip] = result
return results
@register_rpc('/login', database_access=True)
def rpc_login(handler, session, username, password, otp=None):
logger = logging.getLogger('KingPhisher.Server.Authentication')
if not ipaddress.ip_address(handler.client_address[0]).is_loopback:
logger.warning("failed login request from {0} for user {1}, (invalid source address)".format(handler.client_address[0], username))
raise ValueError('invalid source address for login')
fail_default = (False, ConnectionErrorReason.ERROR_INVALID_CREDENTIALS, None)
fail_otp = (False, ConnectionErrorReason.ERROR_INVALID_OTP, None)
if not (username and password):
logger.warning("failed login request from {0} for user {1}, (missing username or password)".format(handler.client_address[0], username))
return fail_default
if not handler.server.forked_authenticator.authenticate(username, password):
logger.warning("failed login request from {0} for user {1}, (authentication failed)".format(handler.client_address[0], username))
return fail_default
user = db_manager.get_row_by_id(session, db_models.User, username)
if not user:
logger.info('creating new user object with id: ' + username)
user = db_models.User(id=username)
session.add(user)
session.commit()
elif user.otp_secret:
if otp is None:
logger.debug("failed login request from {0} for user {1}, (missing otp)".format(handler.client_address[0], username))
return fail_otp
if not (isinstance(otp, str) and len(otp) == 6 and otp.isdigit()):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
totp = pyotp.TOTP(user.otp_secret)
now = datetime.datetime.now()
if not otp in (totp.at(now + datetime.timedelta(seconds=offset)) for offset in (0, -30, 30)):
logger.warning("failed login request from {0} for user {1}, (invalid otp)".format(handler.client_address[0], username))
return fail_otp
session_id = handler.server.session_manager.put(username)
logger.info("successful login request from {0} for user {1}".format(handler.client_address[0], username))
signals.rpc_user_logged_in.send(handler, session=session_id, name=username)
return True, ConnectionErrorReason.SUCCESS, session_id
@register_rpc('/logout', log_call=True)
def rpc_logout(handler):
username = handler.rpc_session.user
handler.server.session_manager.remove(handler.rpc_session_id)
logger = logging.getLogger('KingPhisher.Server.Authentication')
logger.info("successful logout request from {0} for user {1}".format(handler.client_address[0], username))
signals.rpc_user_logged_out.send(handler, session=handler.rpc_session_id, name=username)
@register_rpc('/plugins/list', log_call=True)
def rpc_plugins_list(handler):
"""
Return information regarding enabled plugins in the server.
:return: A dictionary representing enabled plugins and their meta-data.
:rtype: dict
"""
plugin_manager = handler.server.plugin_manager
plugins = {}
for _, plugin in plugin_manager:
plugins[plugin.name] = {
'description': plugin.formatted_description,
'name': plugin.name,
'title': plugin.title,
'version': plugin.version
}
return plugins
|
test_smtpserver.py
|
# Copyright The IETF Trust 2014-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import smtpd
import threading
import asyncore
import debug # pyflakes:ignore
class AsyncCoreLoopThread(object):
def wrap_loop(self, exit_condition, timeout=1.0, use_poll=False, map=None):
if map is None:
map = asyncore.socket_map
while map and not exit_condition:
asyncore.loop(timeout=1.0, use_poll=False, map=map, count=1)
def start(self):
"""Start the listening service"""
self.exit_condition = []
kwargs={'exit_condition':self.exit_condition,'timeout':1.0}
self.thread = threading.Thread(target=self.wrap_loop, kwargs=kwargs)
self.thread.daemon = True
self.thread.daemon = True
self.thread.start()
def stop(self):
"""Stop the listening service"""
self.exit_condition.append(True)
self.thread.join()
class SMTPTestChannel(smtpd.SMTPChannel):
# mail_options = ['BODY=8BITMIME', 'SMTPUTF8']
def smtp_RCPT(self, arg):
if not self.mailfrom:
self.push(str('503 Error: need MAIL command'))
return
arg = self._strip_command_keyword('TO:', arg)
address, __ = self._getaddr(arg)
if not address:
self.push(str('501 Syntax: RCPT TO: <address>'))
return
if "poison" in address:
self.push(str('550 Error: Not touching that'))
return
self.rcpt_options = []
self.rcpttos.append(address)
self.push(str('250 Ok'))
class SMTPTestServer(smtpd.SMTPServer):
def __init__(self,localaddr,remoteaddr,inbox):
if inbox is not None:
self.inbox=inbox
else:
self.inbox = []
smtpd.SMTPServer.__init__(self,localaddr,remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
#channel = SMTPTestChannel(self, conn, addr)
SMTPTestChannel(self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=[], rcpt_options=[]):
self.inbox.append(data)
class SMTPTestServerDriver(object):
def __init__(self, localaddr, remoteaddr, inbox=None):
self.localaddr=localaddr
self.remoteaddr=remoteaddr
if inbox is not None:
self.inbox = inbox
else:
self.inbox = []
self.thread_driver = None
def start(self):
self.smtpserver = SMTPTestServer(self.localaddr,self.remoteaddr,self.inbox)
self.thread_driver = AsyncCoreLoopThread()
self.thread_driver.start()
def stop(self):
if self.thread_driver:
self.thread_driver.stop()
|
ui.pyw
|
import threading
import bulbabot
import iomanage
import asyncio
import time
from PySide2.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, \
QLabel, QListWidget, QListWidgetItem, QLineEdit, QCheckBox, QComboBox
from PySide2.QtCore import Signal, Slot, QObject
Default_Settings = {
"Blacklisted Servers": [], # [serv_id, serv_id, serv_id]
"Pokefarm": {
"Mode": 0, # 0 = Off, 1 = slow (30 - 50 seconds), 2 = Medium (8 - 15 seconds), 3 = Fast (1 - 6 seconds)
"Channel": None # Channel ID
},
"Autocatcher": {
"Mode": 0, # 0 = Off, 1 = Catch All, 2 = Legendary Only, 3 = Blacklist
"Blacklist": [], # Blacklisted pokemon
"Safe": True, # Try to look human
"TimeMode": "w", # W = Whitelist, B = Blacklist
"BlacklistMode": "w", # W = Whitelist, B = Blacklist
"ToCatch": 1, # % Of spawned pokemon to catch, 1 = 100%
"TimeSettings": {
# Server_ID: {"24/7": True, "Day<Num,1-7>": [[Hour1, min1], [Hour2, min2], ..], ..}
}
},
"ClientToken": None, # Token
"RunOnStart": False
}
class TerminalTab(QWidget):
def __init__(self, p):
super().__init__()
self.p = p
RootLayout = QVBoxLayout()
self.setLayout(RootLayout)
self.List = QListWidget()
RootLayout.addWidget(self.List)
self.hide()
#self.print_wrap.connect(self.print)
@Slot(str)
def print(self, string):
item = QListWidgetItem()
item.setText(string)
self.List.addItem(item)
self.List.scrollToBottom()
class AutoCatchTab(QWidget):
def __init__(self, p):
super().__init__()
self.p = p
d = p.io.Read()
RootLayout = QVBoxLayout()
self.setLayout(RootLayout)
## Modes ##
ModeW = QWidget()
RootLayout.addWidget(ModeW)
ModeL = QHBoxLayout()
ModeW.setLayout(ModeL)
ModeLabel = QLabel("Autocatcher Mode")
ModeL.addWidget(ModeLabel)
ModeC = QComboBox()
ModeL.addWidget(ModeC)
ModeC.addItem("Off")
ModeC.addItem("Catch All")
ModeC.addItem("Legendary")
ModeC.addItem("Blacklist/Whitelisted")
ModeC.setCurrentIndex(d["Autocatcher"]["Mode"])
ModeC.currentIndexChanged.connect(self.ChangedMode)
space = QWidget()
ModeL.addWidget(space)
#ModeL.setStretchFactor(space, 1)
HModeB = QCheckBox()
ModeL.addWidget(HModeB)
HModeB.setText("Act Human")
HModeB.setChecked(0 if d["Autocatcher"]["Safe"] == False else 1)
HModeB.clicked.connect(self.ChangeHuman)
space = QWidget()
ModeL.addWidget(space)
ModeL.setStretchFactor(space, 3)
## Spacers
space = QWidget()
RootLayout.addWidget(space)
RootLayout.setStretchFactor(space, 30)
self.hide()
def ChangedMode(self, i):
id = self.p.io.GetId()
d = self.p.io.Read(True, id)
d["Autocatcher"]["Mode"] = i
self.p.io.Write(d, id)
def ChangeHuman(self, c):
id = self.p.io.GetId()
d = self.p.io.Read(True, id)
d["Autocatcher"]["Mode"] = c
self.p.io.Write(d, id)
class PokeFarmTab(QWidget):
def _switched(self):
self.SetupServC()
#self.SetupChanC(0)
def __init__(self, p):
super().__init__()
self.p = p
self.catchchange = False
d = p.io.Read()
RootLayout = QVBoxLayout()
self.setLayout(RootLayout)
## Modes ##
ModeW = QWidget()
RootLayout.addWidget(ModeW)
ModeL = QHBoxLayout()
ModeW.setLayout(ModeL)
ModeLabel = QLabel("Pokefarmer Mode")
ModeL.addWidget(ModeLabel)
ModeC = QComboBox()
ModeL.addWidget(ModeC)
ModeC.addItem("Off")
ModeC.addItem("Slow (30s - 50s between messages)")
ModeC.addItem("Medium (8s - 15s between messages)")
ModeC.addItem("Fast (1s - 6s between messages)")
ModeC.addItem("Bot (1s - 2s between messages)")
ModeC.setCurrentIndex(d["Pokefarm"]["Mode"])
ModeC.currentIndexChanged.connect(self.ChangedMode)
space = QWidget()
ModeL.addWidget(space)
ModeL.setStretchFactor(space, 3)
## Channel ##
CW = QWidget()
RootLayout.addWidget(CW)
CL = QHBoxLayout()
CW.setLayout(CL)
ServLabel = QLabel("Server")
CL.addWidget(ServLabel)
self.ServC = QComboBox()
CL.addWidget(self.ServC)
self.SetupServC()
ChanLabel = QLabel("Channel")
CL.addWidget(ChanLabel)
self.ChanC = QComboBox()
CL.addWidget(self.ChanC)
self.SetupChanC(self.ServC.currentIndex())
space = QWidget()
CL.addWidget(space)
CL.setStretchFactor(self.ChanC, 3)
CL.setStretchFactor(self.ServC, 3)
#CL.setStretchFactor(space, 5)
## Spacers
space = QWidget()
RootLayout.addWidget(space)
RootLayout.setStretchFactor(space, 30)
# End
self.ServC.currentIndexChanged.connect(self.SetupChanC)
self.ChanC.currentTextChanged.connect(self.ChangedChannel)
self.hide()
def SetupServC(self):
if self.p.bot == None or self.p.botthread == None:
self.ServC.clear()
self.ServC.addItem("Bot needs to be on to change this option.")
elif not self.p.bot.is_ready():
self.ServC.clear()
self.ServC.addItem("Please wait for bot to start before changing this option.")
else:
self.ServC.clear()
c = self.p.io.Read()["Pokefarm"]["Channel"]
g = None
if c != None:
g = self.p.bot.get_channel(c).guild.id
servs = self.p.bot.guilds
servnames = []
self.ServC.addItem(" ")
i = 0
ci = 1
for guild in servs:
#servnames.append(guild.name)
self.ServC.addItem(guild.name + " (" + str(guild.id) + ")")
if guild.id == g: i = ci
ci += 1
self.ServC.setCurrentIndex(i)
self.SetupChanC(0 if c == None else c)
#self.ServC.addItems(*servnames)
def SetupChanC(self, i=None):
self.catchchange = True
if self.p.bot == None or self.p.botthread == None:
self.ChanC.clear()
elif not self.p.bot.is_ready():
self.ChanC.clear()
elif i == 0:
self.ChanC.clear()
else:
self.ChanC.clear()
self.catchchange = True
n = self.ServC.currentText()
for guild in self.p.bot.guilds:
if n == guild.name + " (" + str(guild.id) + ")": break
self.ChanC.addItem(" ")
self.catchchange = True
index = 0
ci = 1
for chan in guild.channels:
if chan.type == bulbabot.discord.ChannelType.text:
self.ChanC.addItem(chan.name + " (" + str(chan.id) + ")")
self.p.dbprint(chan.name + " (" + str(chan.id) + ")")
if i != None and chan.id == i:
index = ci
ci += 1
for x in range(0, self.ChanC.count()): self.p.dbprint(self.ChanC.itemText(x))
if len(str(i)) == 18:
self.catchchange = True
self.ChanC.setCurrentIndex(index)
def ChangedChannel(self, t):
if self.catchchange:
self.catchchange = False
return
try:
if t == " ":
d = self.p.io.Read()
d["Pokefarm"]["Channel"] = None
self.p.io.Write(d)
return
self.p.dbprint(t)
id = int(t.split("(")[-1].split(")")[0])
d = self.p.io.Read()
d["Pokefarm"]["Channel"] = id
self.p.io.Write(d)
#asyncio.ensure_future(self.p.bot.Farm(), loop=self.p.bot.loop)
except Exception as e:
self.p.dbprint(e)
print("[UI] Tried to set farming channel, but failed!")
def ChangedMode(self, i):
d = self.p.io.Read()
d["Pokefarm"]["Mode"] = i
self.p.io.Write(d)
if not self.p.bot == None:
if not self.p.bot.pkfm_running:
asyncio.ensure_future(self.p.bot.Farm(), loop=self.p.bot.loop)
class ClientSettingTab(QWidget):
def ChangeToken(self, qle):
t = qle.text()
id = self.p.io.GetId()
d = self.p.io.Read(waitforwrite=True, id=id)
d["ClientToken"] = t
self.p.io.Write(d, id=id)
def ROSChange(self, state):
d = self.p.io.Read()
if state != 0: state = True
else: state = False
d["RunOnStart"] = state
self.p.io.Write(d)
def __init__(self, p):
super().__init__()
self.p = p
d = p.io.Read()
t = d["ClientToken"]
RootLayout = QVBoxLayout()
self.setLayout(RootLayout)
## Token Input ##
TokenInsert = QWidget()
RootLayout.addWidget(TokenInsert)
TokenInsertL = QHBoxLayout()
TokenInsert.setLayout(TokenInsertL)
Label = QLabel("Token")
TokenInsertL.addWidget(Label)
#Spacer = QWidget()
#TokenInsertL.addWidget(Spacer)
TokenEdit = QLineEdit()
TokenEdit.setPlaceholderText("Enter user token here...")
TokenEdit.returnPressed.connect(lambda: self.ChangeToken(TokenEdit))
if t != None: TokenEdit.setText(t)
TokenInsertL.addWidget(TokenEdit)
SetButton = QPushButton("Set Token")
TokenInsertL.addWidget(SetButton)
SetButton.clicked.connect(lambda: self.ChangeToken(TokenEdit))
## Run-On-Start Toggle ##
ROSW = QWidget()
RootLayout.addWidget(ROSW)
ROSWL = QHBoxLayout()
ROSW.setLayout(ROSWL)
#ROSLabel = QLabel("Run Bot On App Start")
#ROSWL.addWidget(ROSLabel)
ROSCB = QCheckBox()
ROSWL.addWidget(ROSCB)
ROSCB.setText("Run Bot on App Start")
ROSCB.setChecked(d["RunOnStart"])
ROSCB.stateChanged.connect(self.ROSChange)
#########
ESpacer = QWidget()
RootLayout.addWidget(ESpacer)
RootLayout.setStretchFactor(ESpacer, 30)
self.hide()
class MainWindow(QWidget):
print_wrap = Signal(str)
def print(self, s):
self.print_wrap.emit(str(s))
def ChangeTab(self, tab):
if not self.active == None:
self.active.hide()
try:
tab._switched()
except:
pass
self.active = tab
tab.show()
def stop_bot(self):
try:
self.StartButton.setEnabled(False)
self.StopButton.setEnabled(False)
asyncio.ensure_future(self.bot.close(), loop = self.bot.loop)
while self.botthread.is_alive():
time.sleep(.1)
print("[UI] Stopped bot.")
self.botthread = None
self.bot = None
except:
print("[UI] Tried to stop bot, but bot wasn't running!")
self.StartButton.setEnabled(True)
self.StopButton.setEnabled(True)
def start_bot(self):
if self.botthread == None or not self.botthread.is_alive():
self.StartButton.setEnabled(False)
self.StopButton.setEnabled(False)
print("[UI] Starting bot...")
asyncio.set_event_loop(asyncio.new_event_loop())
self.bot = bulbabot.bot(self.io)
self.botthread = threading.Thread(target = self.bot.run, args=["MAIN"],\
kwargs={"ror": [[self.StopButton.setEnabled, True], [self.StartButton.setEnabled, True], [self.SetupServ]]})#, [self.SetupChan, 0]]})
self.botthread.daemon = True
self.botthread.start()
#self.StartButton.setEnabled(True)
else:
print("[UI] Tried to start bot, but bot was already running!")
def __init__(self):
global print
super().__init__()
self.dbprint = print
print = self.print
self.io = iomanage.IOManager("configs.json")
d = self.io.Read()
if d == {}:
self.io.Write(Default_Settings)
d = Default_Settings
bulbabot.print = self.print
self.bot = None
self.botthread = None
#self.botthread = threading.Thread(target = self.bot.run)
#self.botthread.daemon = True
#self.botthread.start()
RootLayout = QHBoxLayout()
self.setLayout(RootLayout)
##### Tab Bar ####
TabBar = QWidget()
RootLayout.addWidget(TabBar)
TabBarLayout = QVBoxLayout()
TabBar.setLayout(TabBarLayout)
SSW = QWidget()
TabBarLayout.addWidget(SSW)
SSWL = QHBoxLayout()
SSW.setLayout(SSWL)
self.StartButton = QPushButton("Start Bot")
self.StartButton.clicked.connect(self.start_bot)
self.StopButton = QPushButton("Stop Bot")
self.StopButton.clicked.connect(self.stop_bot)
SSWL.addWidget(self.StartButton)
SSWL.addWidget(self.StopButton)
Spacer = QWidget()
TabBarLayout.addWidget(Spacer)
TerminalButton = QPushButton("Terminal")
TabBarLayout.addWidget(TerminalButton)
AutoCatcherButton = QPushButton("Autocatcher")
TabBarLayout.addWidget(AutoCatcherButton)
PokeFarmerButton = QPushButton("Poke-Farmer")
TabBarLayout.addWidget(PokeFarmerButton)
Spacer1 = QWidget()
TabBarLayout.addWidget(Spacer1)
SettingsButton = QPushButton("Client Settings")
TabBarLayout.addWidget(SettingsButton)
Spacer2 = QWidget()
TabBarLayout.addWidget(Spacer2)
##### Tab Section ####
TabSect = QWidget()
RootLayout.addWidget(TabSect)
TabSectL = QVBoxLayout()
TabSect.setLayout(TabSectL)
## Tabs ##
TermTab = TerminalTab(self)
TabSectL.addWidget(TermTab)
ACTab = AutoCatchTab(self)
TabSectL.addChildWidget(ACTab)
CSTab = ClientSettingTab(self)
TabSectL.addWidget(CSTab)
PKTab = PokeFarmTab(self)
TabSectL.addWidget(PKTab)
self.SetupServ = PKTab.SetupServC
self.SetupChan = PKTab.SetupChanC
## Sig Connects ##
TerminalButton.clicked.connect(lambda: self.ChangeTab(TermTab))
AutoCatcherButton.clicked.connect(lambda: self.ChangeTab(ACTab))
PokeFarmerButton.clicked.connect(lambda: self.ChangeTab(PKTab))
SettingsButton.clicked.connect(lambda: self.ChangeTab(CSTab))
##### Sizing #####
TabBarLayout.setStretchFactor(Spacer, 2)
TabBarLayout.setStretchFactor(Spacer1, 2)
TabBarLayout.setStretchFactor(Spacer2, 20)
RootLayout.setStretchFactor(TabBar, 1)
RootLayout.setStretchFactor(TabSect, 5)
##### SIG & SLOT ####
#SigObj = SignalObject()
#SigObj.sig.connect(TermTab.print)
self.print_wrap.connect(TermTab.print)
self.showNormal()
TermTab.show()
self.active = TermTab
if d["RunOnStart"]: self.start_bot()
app = QApplication([])
Window = MainWindow()
Window.setWindowTitle("BulbaBot")
app.exec_()
Window.stop_bot()
Window.io.Stop()
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (check_warnings, import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import time
import warnings
import inspect
try:
import threading
except ImportError:
threading = None
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
import locale
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator'
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator'
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
@unittest.skipUnless(threading, 'threading required')
class CThreadingTest(ThreadingTest):
decimal = C
@unittest.skipUnless(threading, 'threading required')
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
pass
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(C.__libmpdec_version__, P.__libmpdec_version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000026')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception as err:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
coach.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import copy
from configparser import ConfigParser, Error
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import atexit
import time
import sys
import json
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters, \
RunType, DistributedCoachSynchronizationType
from rl_coach.core_types import TotalStepsCounter, RunPhase, PlayingStepsType, TrainingSteps, EnvironmentEpisodes, \
EnvironmentSteps, StepMethod, Transition
from multiprocessing import Process
from multiprocessing.managers import BaseManager
import subprocess
from glob import glob
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from rl_coach.memories.backend.redis import RedisPubSubMemoryBackendParameters
from rl_coach.memories.backend.memory_impl import construct_memory_params
from rl_coach.data_stores.data_store import DataStoreParameters
from rl_coach.data_stores.s3_data_store import S3DataStoreParameters
from rl_coach.data_stores.nfs_data_store import NFSDataStoreParameters
from rl_coach.data_stores.redis_data_store import RedisDataStoreParameters
from rl_coach.data_stores.data_store_impl import get_data_store, construct_data_store_params
from rl_coach.training_worker import training_worker
from rl_coach.rollout_worker import rollout_worker
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
"""
open X11 based dashboard in a new process (nonblocking)
"""
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="bash")
subprocess.Popen(cmd, shell=True, executable="bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
"""
Runs the graph_manager using the configured task_parameters.
This stand-alone method is a convenience for multiprocessing.
"""
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only is not None:
steps_to_evaluate = task_parameters.evaluate_only if task_parameters.evaluate_only > 0 \
else sys.maxsize
graph_manager.evaluate(EnvironmentSteps(steps_to_evaluate))
else:
graph_manager.improve()
graph_manager.close()
def handle_distributed_coach_tasks(graph_manager, args, task_parameters):
ckpt_inside_container = "/checkpoint"
memory_backend_params = None
if args.memory_backend_params:
memory_backend_params = json.loads(args.memory_backend_params)
memory_backend_params['run_type'] = str(args.distributed_coach_run_type)
graph_manager.agent_params.memory.register_var('memory_backend_params', construct_memory_params(memory_backend_params))
data_store = None
data_store_params = None
if args.data_store_params:
data_store_params = construct_data_store_params(json.loads(args.data_store_params))
data_store_params.expt_dir = args.experiment_path
data_store_params.checkpoint_dir = ckpt_inside_container
graph_manager.data_store_params = data_store_params
data_store = get_data_store(data_store_params)
if args.distributed_coach_run_type == RunType.TRAINER:
task_parameters.checkpoint_save_dir = ckpt_inside_container
training_worker(
graph_manager=graph_manager,
data_store=data_store,
task_parameters=task_parameters,
is_multi_node_test=args.is_multi_node_test
)
if args.distributed_coach_run_type == RunType.ROLLOUT_WORKER:
rollout_worker(
graph_manager=graph_manager,
data_store=data_store,
num_workers=args.num_workers,
task_parameters=task_parameters
)
def handle_distributed_coach_orchestrator(args):
from rl_coach.orchestrators.kubernetes_orchestrator import KubernetesParameters, Kubernetes, \
RunTypeParameters
ckpt_inside_container = "/checkpoint"
arg_list = sys.argv[1:]
try:
i = arg_list.index('--distributed_coach_run_type')
arg_list.pop(i)
arg_list.pop(i)
except ValueError:
pass
trainer_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.TRAINER)] + arg_list
rollout_command = ['python3', 'rl_coach/coach.py', '--distributed_coach_run_type', str(RunType.ROLLOUT_WORKER)] + arg_list
if '--experiment_name' not in rollout_command:
rollout_command = rollout_command + ['--experiment_name', args.experiment_name]
if '--experiment_name' not in trainer_command:
trainer_command = trainer_command + ['--experiment_name', args.experiment_name]
memory_backend_params = None
if args.memory_backend == "redispubsub":
memory_backend_params = RedisPubSubMemoryBackendParameters()
ds_params_instance = None
if args.data_store == "s3":
ds_params = DataStoreParameters("s3", "", "")
ds_params_instance = S3DataStoreParameters(ds_params=ds_params, end_point=args.s3_end_point, bucket_name=args.s3_bucket_name,
creds_file=args.s3_creds_file, checkpoint_dir=ckpt_inside_container, expt_dir=args.experiment_path)
elif args.data_store == "nfs":
ds_params = DataStoreParameters("nfs", "kubernetes", "")
ds_params_instance = NFSDataStoreParameters(ds_params)
elif args.data_store == "redis":
ds_params = DataStoreParameters("redis", "kubernetes", "")
ds_params_instance = RedisDataStoreParameters(ds_params)
else:
raise ValueError("data_store {} found. Expected 's3' or 'nfs'".format(args.data_store))
worker_run_type_params = RunTypeParameters(args.image, rollout_command, run_type=str(RunType.ROLLOUT_WORKER), num_replicas=args.num_workers)
trainer_run_type_params = RunTypeParameters(args.image, trainer_command, run_type=str(RunType.TRAINER))
orchestration_params = KubernetesParameters([worker_run_type_params, trainer_run_type_params],
kubeconfig='~/.kube/config',
memory_backend_parameters=memory_backend_params,
data_store_params=ds_params_instance)
orchestrator = Kubernetes(orchestration_params)
if not orchestrator.setup(args.checkpoint_restore_dir):
print("Could not setup.")
return 1
if orchestrator.deploy_trainer():
print("Successfully deployed trainer.")
else:
print("Could not deploy trainer.")
return 1
if orchestrator.deploy_worker():
print("Successfully deployed rollout worker(s).")
else:
print("Could not deploy rollout worker(s).")
return 1
if args.dump_worker_logs:
screen.log_title("Dumping rollout worker logs in: {}".format(args.experiment_path))
orchestrator.worker_logs(path=args.experiment_path)
exit_code = 1
try:
exit_code = orchestrator.trainer_logs()
except KeyboardInterrupt:
pass
orchestrator.undeploy()
return exit_code
class CoachLauncher(object):
"""
This class is responsible for gathering all user-specified configuration options, parsing them,
instantiating a GraphManager and then starting that GraphManager with either improve() or evaluate().
This class is also responsible for launching multiple processes.
It is structured so that it can be sub-classed to provide alternate mechanisms to configure and launch
Coach jobs.
The key entry-point for this class is the .launch() method which is expected to be called from __main__
and handle absolutely everything for a job.
"""
def launch(self):
"""
Main entry point for the class, and the standard way to run coach from the command line.
Parses command-line arguments through argparse, instantiates a GraphManager and then runs it.
"""
parser = self.get_argument_parser()
args = self.get_config_args(parser)
graph_manager = self.get_graph_manager_from_args(args)
self.run_graph_manager(graph_manager, args)
def get_graph_manager_from_args(self, args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user.
:param args: the arguments given by the user
:return: the graph manager, not bound to task_parameters yet.
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
from rl_coach.agents.human_agent import HumanAgentParameters
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
# Set framework
# Note: Some graph managers (e.g. HAC preset) create multiple agents and the attribute is called agents_params
if hasattr(graph_manager, 'agent_params'):
for network_parameters in graph_manager.agent_params.network_wrappers.values():
network_parameters.framework = args.framework
elif hasattr(graph_manager, 'agents_params'):
for ap in graph_manager.agents_params:
for network_parameters in ap.network_wrappers.values():
network_parameters.framework = args.framework
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None and graph_manager.env_params is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def display_all_presets_and_exit(self):
# list available presets
screen.log_title("Available Presets:")
for preset in sorted(list_all_presets()):
print(preset)
sys.exit(0)
def expand_preset(self, preset):
"""
Replace a short preset name with the full python path, and verify that it can be imported.
"""
if preset.lower() in [p.lower() for p in list_all_presets()]:
preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', preset))
else:
preset = "{}".format(preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(preset.split(":")) == 1:
preset += ":graph_manager"
# verify that the preset exists
preset_path = preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(preset))
return preset
def get_config_args(self, parser: argparse.ArgumentParser, arguments=None) -> argparse.Namespace:
"""
Returns a Namespace object with all the user-specified configuration options needed to launch.
This implementation uses argparse to take arguments from the CLI, but this can be over-ridden by
another method that gets its configuration from elsewhere. An equivalent method however must
return an identically structured Namespace object, which conforms to the structure defined by
get_argument_parser.
This method parses the arguments that the user entered, does some basic validation, and
modification of user-specified values in short form to be more explicit.
:param parser: a parser object which implicitly defines the format of the Namespace that
is expected to be returned.
:param arguments: command line arguments
:return: the parsed arguments as a Namespace
"""
if arguments is None:
args = parser.parse_args()
else:
args = parser.parse_args(arguments)
if args.nocolor:
screen.set_use_colors(False)
# if no arg is given
if (len(sys.argv) == 1 and arguments is None) or (arguments is not None and len(arguments) <= 2):
parser.print_help()
sys.exit(1)
# list available presets
if args.list:
self.display_all_presets_and_exit()
# Read args from config file for distributed Coach.
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
coach_config = ConfigParser({
'image': '',
'memory_backend': 'redispubsub',
'data_store': 's3',
's3_end_point': 's3.amazonaws.com',
's3_bucket_name': '',
's3_creds_file': ''
})
try:
coach_config.read(args.distributed_coach_config_path)
args.image = coach_config.get('coach', 'image')
args.memory_backend = coach_config.get('coach', 'memory_backend')
args.data_store = coach_config.get('coach', 'data_store')
if args.data_store == 's3':
args.s3_end_point = coach_config.get('coach', 's3_end_point')
args.s3_bucket_name = coach_config.get('coach', 's3_bucket_name')
args.s3_creds_file = coach_config.get('coach', 's3_creds_file')
except Error as e:
screen.error("Error when reading distributed Coach config file: {}".format(e))
if args.image == '':
screen.error("Image cannot be empty.")
data_store_choices = ['s3', 'nfs', 'redis']
if args.data_store not in data_store_choices:
screen.warning("{} data store is unsupported.".format(args.data_store))
screen.error("Supported data stores are {}.".format(data_store_choices))
memory_backend_choices = ['redispubsub']
if args.memory_backend not in memory_backend_choices:
screen.warning("{} memory backend is not supported.".format(args.memory_backend))
screen.error("Supported memory backends are {}.".format(memory_backend_choices))
if args.data_store == 's3':
if args.s3_bucket_name == '':
screen.error("S3 bucket name cannot be empty.")
if args.s3_creds_file == '':
args.s3_creds_file = None
if args.play and args.distributed_coach:
screen.error("Playing is not supported in distributed Coach.")
# replace a short preset name with the full path
if args.preset is not None:
args.preset = self.expand_preset(args.preset)
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
# If distributed trainer, the checkpoint dir is not yet available so skipping the check in that case.
if not (args.distributed_coach and args.distributed_coach_run_type in [RunType.TRAINER, RunType.ROLLOUT_WORKER]):
screen.error("The requested checkpoint folder to load from does not exist.")
# validate the checkpoints args
if args.checkpoint_restore_file is not None and not glob(args.checkpoint_restore_file + '*'):
screen.error("The requested checkpoint file to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play and not args.environment_type:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name, args.experiment_path)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.checkpoint_save_dir = os.path.join(args.experiment_path, 'checkpoint') if args.checkpoint_save_secs is not None else None
if args.export_onnx_graph and not args.checkpoint_save_secs:
screen.warning("Exporting ONNX graphs requires setting the --checkpoint_save_secs flag. "
"The --export_onnx_graph will have no effect.")
return args
def get_argument_parser(self) -> argparse.ArgumentParser:
"""
This returns an ArgumentParser object which defines the set of options that customers are expected to supply in order
to launch a coach job.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default=None,
type=str)
parser.add_argument('-ep', '--experiment_path',
help="(string) Path to experiments folder.",
default=None,
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow, mxnet",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(flag) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(int) Run evaluation only, for at least the given number of steps (note that complete "
"episodes are evaluated). This is a convenient way to disable training in order "
"to evaluate an existing checkpoint. If value is 0, or no value is provided, "
"evaluation will run for an infinite number of steps.",
nargs='?',
const=0,
type=int)
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('--nocolor',
help="(flag) Turn off color-codes in screen logging. Ascii text only",
action='store_true')
parser.add_argument('-s', '--checkpoint_save_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-crf', '--checkpoint_restore_file',
help='(string) Path to a checkpoint file to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset.",
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization_parameters.render=False; heatup_steps=EnvironmentSteps(1000);"
"improve_steps=TrainingSteps(100000); optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('-onnx', '--export_onnx_graph',
help="(flag) Export the ONNX graph to the experiment directory. "
"This will have effect only if the --checkpoint_save_secs flag is used in order to store "
"checkpoints, since the weights checkpoint are needed for the ONNX graph. "
"Keep in mind that this can cause major overhead on the experiment. "
"Exporting ONNX graphs requires manually installing the tf2onnx package "
"(https://github.com/onnx/tensorflow-onnx).",
action='store_true')
parser.add_argument('-dc', '--distributed_coach',
help="(flag) Use distributed Coach.",
action='store_true')
parser.add_argument('-dcp', '--distributed_coach_config_path',
help="(string) Path to config file when using distributed rollout workers."
"Only distributed Coach parameters should be provided through this config file."
"Rest of the parameters are provided using Coach command line options."
"Used only with --distributed_coach flag."
"Ignored if --distributed_coach flag is not used.",
type=str)
parser.add_argument('--memory_backend_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--data_store_params',
help=argparse.SUPPRESS,
type=str)
parser.add_argument('--distributed_coach_run_type',
help=argparse.SUPPRESS,
type=RunType,
default=RunType.ORCHESTRATOR,
choices=list(RunType))
parser.add_argument('-asc', '--apply_stop_condition',
help="(flag) If set, this will apply a stop condition on the run, defined by reaching a"
"target success rate as set by the environment or a custom success rate as defined "
"in the preset. ",
action='store_true')
parser.add_argument('--dump_worker_logs',
help="(flag) Only used in distributed coach. If set, the worker logs are saved in the experiment dir",
action='store_true')
parser.add_argument('--is_multi_node_test',
help=argparse.SUPPRESS,
action='store_true')
return parser
def run_graph_manager(self, graph_manager: 'GraphManager', args: argparse.Namespace):
task_parameters = self.create_task_parameters(graph_manager, args)
if args.distributed_coach and args.distributed_coach_run_type != RunType.ORCHESTRATOR:
handle_distributed_coach_tasks(graph_manager, args, task_parameters)
return
# Single-threaded runs
if args.num_workers == 1:
self.start_single_threaded(task_parameters, graph_manager, args)
else:
self.start_multi_threaded(graph_manager, args)
@staticmethod
def create_task_parameters(graph_manager: 'GraphManager', args: argparse.Namespace):
if args.distributed_coach and not graph_manager.agent_params.algorithm.distributed_coach_synchronization_type:
screen.error(
"{} algorithm is not supported using distributed Coach.".format(graph_manager.agent_params.algorithm))
if args.distributed_coach and args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:
screen.warning(
"The --checkpoint_save_secs or -s argument will be ignored as SYNC distributed coach sync type is used. Checkpoint will be saved every training iteration.")
if args.distributed_coach and not args.checkpoint_save_secs and graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.ASYNC:
screen.error(
"Distributed coach with ASYNC distributed coach sync type requires --checkpoint_save_secs or -s.")
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary and not args.distributed_coach:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
if args.checkpoint_restore_dir is not None and args.checkpoint_restore_file is not None:
raise ValueError("Only one of the checkpoint_restore_dir and checkpoint_restore_file arguments can be used"
" simulatenously.")
checkpoint_restore_path = args.checkpoint_restore_dir if args.checkpoint_restore_dir \
else args.checkpoint_restore_file
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
if args.distributed_coach and args.distributed_coach_run_type == RunType.ORCHESTRATOR:
exit(handle_distributed_coach_orchestrator(args))
task_parameters = TaskParameters(
framework_type=args.framework,
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=checkpoint_restore_path,
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
return task_parameters
@staticmethod
def start_single_threaded(task_parameters, graph_manager: 'GraphManager', args: argparse.Namespace):
# Start the training or evaluation
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
@staticmethod
def start_multi_threaded(graph_manager: 'GraphManager', args: argparse.Namespace):
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
ps_hosts = "localhost:{}".format(get_open_port())
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
# Shared memory
class CommManager(BaseManager):
pass
CommManager.register('SharedMemoryScratchPad', SharedMemoryScratchPad, exposed=['add', 'get', 'internal_call'])
comm_manager = CommManager()
comm_manager.start()
shared_memory_scratchpad = comm_manager.SharedMemoryScratchPad()
if args.checkpoint_restore_file:
raise ValueError("Multi-Process runs only support restoring checkpoints from a directory, "
"and not from a file. ")
def start_distributed_task(job_type, task_index, evaluation_worker=False,
shared_memory_scratchpad=shared_memory_scratchpad):
task_parameters = DistributedTaskParameters(
framework_type=args.framework,
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=0 if evaluation_worker else None, # 0 value for evaluation worker as it should run infinitely
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=shared_memory_scratchpad,
seed=args.seed+task_index if args.seed is not None else None, # each worker gets a different seed
checkpoint_save_secs=args.checkpoint_save_secs,
checkpoint_restore_path=args.checkpoint_restore_dir, # MonitoredTrainingSession only supports a dir
checkpoint_save_dir=args.checkpoint_save_dir,
export_onnx_graph=args.export_onnx_graph,
apply_stop_condition=args.apply_stop_condition
)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
p = Process(target=start_graph, args=(graph_manager, task_parameters))
# p.daemon = True
p.start()
return p
# parameter server
parameter_server = start_distributed_task("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task("worker", task_index))
# evaluation worker
if args.evaluation_worker or args.render:
evaluation_worker = start_distributed_task("worker", args.num_workers, evaluation_worker=True)
# wait for all workers
[w.join() for w in workers]
if args.evaluation_worker:
evaluation_worker.terminate()
class CoachInterface(CoachLauncher):
"""
This class is used as an interface to use coach as library. It can take any of the command line arguments
(with the respective names) as arguments to the class.
"""
def __init__(self, **kwargs):
parser = self.get_argument_parser()
arguments = []
for key in kwargs:
arguments.append('--' + key)
arguments.append(str(kwargs[key]))
if '--experiment_name' not in arguments:
arguments.append('--experiment_name')
arguments.append('')
self.args = self.get_config_args(parser, arguments)
self.graph_manager = self.get_graph_manager_from_args(self.args)
if self.args.num_workers == 1:
task_parameters = self.create_task_parameters(self.graph_manager, self.args)
self.graph_manager.create_graph(task_parameters)
def run(self):
self.run_graph_manager(self.graph_manager, self.args)
def main():
launcher = CoachLauncher()
launcher.launch()
if __name__ == "__main__":
main()
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QKeySequence
from qtpy.QtWidgets import (QApplication, QMainWindow, QMenu, QMessageBox,
QShortcut, QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app.find_plugins import (
find_external_plugins, find_internal_plugins)
from spyder.app.utils import (
create_application, create_splash_screen, create_window, ORIGINAL_SYS_EXIT,
delete_debug_log_files, qt_message_handler, set_links_color, setup_logging,
set_opengl_implementation)
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
Plugins, SpyderPlugin, SpyderPluginV2, SpyderDockablePlugin,
SpyderPluginWidget)
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
sig_layout_setup_ready = Signal(object) # Related to default layouts
# ---- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
if error:
raise SpyderAPIError(f'Plugin "{plugin_name}" not found!')
return None
def get_dockable_plugins(self):
"""Get a list of all dockable plugins."""
dockable_plugins = []
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
dockable_plugins.append((plugin_name, plugin))
return dockable_plugins
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is available."""
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def register_plugin(self, plugin_name, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Connect Plugin Signals to main window methods
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Connect Main window Signals to plugin signals
self.sig_moved.connect(plugin.sig_mainwindow_moved)
self.sig_resized.connect(plugin.sig_mainwindow_resized)
# Register plugin
plugin._register(omit_conf=omit_conf)
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
if plugin_name == Plugins.Shortcuts:
for action, context, action_name in self.shortcut_queue:
self.register_shortcut(action, context, action_name)
self.shortcut_queue = []
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(action, context, action_name)
else:
self.shortcut_queue.append((action, context, action_name))
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(sc, context, name)
self.register_shortcut(
plugin.toggle_view_action, context, name)
else:
self.shortcut_queue.append((sc, context, name))
self.shortcut_queue.append(
(plugin.toggle_view_action, context, name))
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
# Show external plugins
if plugin.NAME in PLUGIN_REGISTRY.external_plugins:
plugin.get_widget().toggle_view(True)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure plugins are placed correctly when
# switching layouts.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
console = self.get_plugin(Plugins.Console, error=False)
if console:
console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
# Set Windows app icon to use .ico file
if os.name == "nt":
qapp.setWindowIcon(ima.get_icon("windows_app_icon"))
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
self.shortcut_queue = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# ---- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
PLUGIN_REGISTRY.sig_plugin_ready.connect(
lambda plugin_name, omit_conf: self.register_plugin(
plugin_name, omit_conf=omit_conf))
PLUGIN_REGISTRY.set_main(self)
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
registry_internal_plugins = {}
registry_external_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
if plugin_name in internal_plugins:
registry_internal_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
else:
registry_external_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
PLUGIN_REGISTRY.set_all_internal_plugins(registry_internal_plugins)
PLUGIN_REGISTRY.set_all_external_plugins(registry_external_plugins)
# Instantiate internal Spyder 5 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPluginV2):
PLUGIN_REGISTRY.register_plugin(self, PluginClass,
external=False)
# Instantiate internal Spyder 4 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPlugin):
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
self.preferences.register_plugin_preferences(
plugin_instance)
# Instantiate external Spyder 5 plugins
for plugin_name in external_plugins:
if plugin_name in enabled_plugins:
PluginClass = external_plugins[plugin_name]
try:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=True)
except Exception as error:
print("%s: %s" % (PluginClass, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = PLUGIN_REGISTRY.register_plugin(self, mod,
external=True)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if not hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.thirdparty_plugins.append(plugin)
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.plugins.mainmenu.api import (
ApplicationMenus, ToolsMenuSections, FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut,
id_='file_switcher')
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut,
id_='symbol_finder')
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None]
if self.get_plugin(Plugins.Editor, error=False):
self.edit_menu_actions += self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
id_='spyder_path_action')
from spyder.plugins.application.container import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = ApplicationActions.SpyderWindowsEnvVariables
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action,
before_section=ToolsMenuSections.External
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
try:
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(
self._INTERNAL_PLUGINS_MAPPING[attr], error=False)
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_name in PLUGIN_REGISTRY:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
# Tabify external plugins which were installed after Spyder was
# installed.
# Note: This is only necessary the first time a plugin is loaded.
# Afterwwrds, the plugin placement is recorded on the window hexstate,
# which is loaded by the layouts plugin during the next session.
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if plugin_instance.get_conf('first_time', True):
self.tabify_plugin(plugin_instance, Plugins.Console)
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin_name, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
is triggered.
"""
# Required plugins
projects = self.get_plugin(Plugins.Projects, error=False)
editor = self.get_plugin(Plugins.Editor, error=False)
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# Call on_mainwindow_visible for all plugins.
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin.on_mainwindow_visible()
QApplication.processEvents()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Load project, if any.
# TODO: Remove this reference to projects once we can send the command
# line options to the plugins.
if self.open_project:
if not running_in_mac_app():
if projects:
projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
if projects:
projects.reopen_last_project()
# If no project is active, load last session
if projects and projects.get_active_project() is None:
if editor:
editor.setup_open_files(close_previous_files=False)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# To avoid regressions. We shouldn't have loaded the modules
# below at this point.
if DEV is not None:
assert 'pandas' not in sys.modules
assert 'matplotlib' not in sys.modules
# Restore undocked plugins
self.restore_undocked_plugins()
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def restore_undocked_plugins(self):
"""Restore plugins that were undocked in the previous session."""
logger.info("Restoring undocked plugins from the previous session")
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
if plugin.get_conf('undocked_on_window_close', default=False):
plugin.get_widget().create_window()
elif isinstance(plugin, SpyderPluginWidget):
if plugin.get_option('undocked_on_window_close',
default=False):
plugin._create_window()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
projects = self.get_plugin(Plugins.Projects, error=False)
if projects:
path = projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
shortcuts = self.get_plugin(Plugins.Shortcuts, error=False)
if shortcuts:
shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
editor = self.get_plugin(Plugins.Editor, error=False)
if editor:
editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
if hasattr(self, 'editor'):
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.get_plugin(Plugins.Editor, error=False):
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if hasattr(self, 'layouts'):
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False, close_immediately=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
can_close = PLUGIN_REGISTRY.delete_all_plugins(
excluding={Plugins.Layout}, close_immediately=close_immediately)
if not can_close and not close_immediately:
return False
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
if self.layouts is not None:
self.layouts.save_current_window_settings(prefix)
PLUGIN_REGISTRY.delete_plugin(Plugins.Layout)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
console = self.get_plugin(Plugins.Console, error=False)
if console:
if state:
console.redirect_stds()
else:
console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
editor = self.get_plugin(Plugins.Editor, error=False)
variableexplorer = self.get_plugin(
Plugins.VariableExplorer, error=False)
if encoding.is_text_file(fname):
if editor:
editor.load(fname)
elif variableexplorer is not None and ext in IMPORT_EXT:
variableexplorer.get_widget().import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
projects = self.get_plugin(Plugins.Projects, error=False)
read_only_path = ()
if projects:
read_only_path = tuple(projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
projects = self.get_plugin(Plugins.Projects, error=False)
self.project_path = ()
if projects:
self.project_path = tuple(projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False, close_immediately=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(
reset=reset, close_immediately=close_immediately)
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(MainWindow, app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
if get_debug_level() > 0:
delete_debug_log_files()
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(
MainWindow, app, splash, options, args
)
else:
mainwindow = create_window(MainWindow, app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
learner.py
|
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IMPALA learner class."""
import functools
import itertools
import queue
import threading
from typing import Dict, Tuple, Text
import warnings
import dm_env
import haiku as hk
from examples.impala import agent as agent_lib
from examples.impala import util
import jax
from jax.experimental import optimizers
from jax.experimental import optix
import jax.numpy as jnp
import numpy as np
import rlax
# The IMPALA paper sums losses, rather than taking the mean.
# We wrap rlax to do so as well.
def policy_gradient_loss(logits, *args):
"""rlax.policy_gradient_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.policy_gradient_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
def entropy_loss(logits, *args):
"""rlax.entropy_loss, but with sum(loss) and [T, B, ...] inputs."""
mean_per_batch = jax.vmap(rlax.entropy_loss, in_axes=1)(logits, *args)
total_loss_per_batch = mean_per_batch * logits.shape[0]
return jnp.sum(total_loss_per_batch)
class Learner:
"""Manages state and performs updates for IMPALA learner."""
def __init__(
self,
agent: agent_lib.Agent,
rng_key,
opt: optix.InitUpdate,
batch_size: int,
discount_factor: float,
frames_per_iter: int,
max_abs_reward: float = 0,
logger=None,
):
if jax.device_count() > 1:
warnings.warn('Note: the impala example will only take advantage of a '
'single accelerator.')
self._agent = agent
self._opt = opt
self._batch_size = batch_size
self._discount_factor = discount_factor
self._frames_per_iter = frames_per_iter
self._max_abs_reward = max_abs_reward
# Data pipeline objects.
self._done = False
self._host_q = queue.Queue(maxsize=self._batch_size)
self._device_q = queue.Queue(maxsize=1)
# Prepare the parameters to be served to actors.
params = agent.initial_params(rng_key)
self._params_for_actor = (0, jax.device_get(params))
# Set up logging.
if logger is None:
logger = util.NullLogger()
self._logger = logger
def _loss(
self,
theta: hk.Params,
trajectories: util.Transition,
) -> Tuple[jnp.ndarray, Dict[Text, jnp.ndarray]]:
"""Compute vtrace-based actor-critic loss."""
initial_state = jax.tree_map(lambda t: t[0], trajectories.agent_state)
learner_outputs = self._agent.unroll(theta, trajectories.timestep,
initial_state)
v_t = learner_outputs.values[1:]
# Remove bootstrap timestep from non-timesteps.
_, actor_out, _ = jax.tree_map(lambda t: t[:-1], trajectories)
learner_outputs = jax.tree_map(lambda t: t[:-1], learner_outputs)
v_tm1 = learner_outputs.values
# Get the discount, reward, step_type from the *next* timestep.
timestep = jax.tree_map(lambda t: t[1:], trajectories.timestep)
discounts = timestep.discount * self._discount_factor
rewards = timestep.reward
if self._max_abs_reward > 0:
rewards = jnp.clip(rewards, -self._max_abs_reward, self._max_abs_reward)
# The step is uninteresting if we transitioned LAST -> FIRST.
# timestep corresponds to the *next* time step, so we filter for FIRST.
mask = jnp.not_equal(timestep.step_type, int(dm_env.StepType.FIRST))
mask = mask.astype(jnp.float32)
rhos = rlax.categorical_importance_sampling_ratios(
learner_outputs.policy_logits, actor_out.policy_logits,
actor_out.action)
# vmap vtrace_td_error_and_advantage to take/return [T, B, ...].
vtrace_td_error_and_advantage = jax.vmap(
rlax.vtrace_td_error_and_advantage, in_axes=1, out_axes=1)
vtrace_returns = vtrace_td_error_and_advantage(
v_tm1, v_t, rewards, discounts, rhos)
pg_advs = vtrace_returns.pg_advantage
pg_loss = policy_gradient_loss(learner_outputs.policy_logits,
actor_out.action, pg_advs, mask)
baseline_loss = 0.5 * jnp.sum(jnp.square(vtrace_returns.errors) * mask)
ent_loss = entropy_loss(learner_outputs.policy_logits, mask)
total_loss = pg_loss
total_loss += 0.5 * baseline_loss
total_loss += 0.01 * ent_loss
logs = {}
logs['PG_loss'] = pg_loss
logs['baseline_loss'] = baseline_loss
logs['entropy_loss'] = ent_loss
logs['total_loss'] = total_loss
return total_loss, logs
@functools.partial(jax.jit, static_argnums=0)
def update(self, params, opt_state, batch: util.Transition):
"""The actual update function."""
(_, logs), grads = jax.value_and_grad(
self._loss, has_aux=True)(params, batch)
grad_norm_unclipped = optimizers.l2_norm(grads)
updates, updated_opt_state = self._opt.update(grads, opt_state)
params = optix.apply_updates(params, updates)
weight_norm = optimizers.l2_norm(params)
logs.update({
'grad_norm_unclipped': grad_norm_unclipped,
'weight_norm': weight_norm,
})
return params, updated_opt_state, logs
def enqueue_traj(self, traj: util.Transition):
"""Enqueue trajectory."""
self._host_q.put(traj)
def params_for_actor(self) -> Tuple[int, hk.Params]:
return self._params_for_actor
def host_to_device_worker(self):
"""Elementary data pipeline."""
batch = []
while not self._done:
# Try to get a batch. Skip the iteration if we couldn't.
try:
for _ in range(len(batch), self._batch_size):
# As long as possible while keeping learner_test time reasonable.
batch.append(self._host_q.get(timeout=10))
except queue.Empty:
continue
assert len(batch) == self._batch_size
# Prepare for consumption, then put batch onto device.
stacked_batch = jax.tree_multimap(lambda *xs: np.stack(xs, axis=1),
*batch)
self._device_q.put(jax.device_put(stacked_batch))
# Clean out the built-up batch.
batch = []
def run(self, max_iterations: int = -1):
"""Runs the learner for max_iterations updates."""
# Start host-to-device transfer worker.
transfer_thread = threading.Thread(target=self.host_to_device_worker)
transfer_thread.start()
(num_frames, params) = self._params_for_actor
opt_state = self._opt.init(params)
steps = range(max_iterations) if max_iterations != -1 else itertools.count()
for _ in steps:
batch = self._device_q.get()
params, opt_state, logs = self.update(params, opt_state, batch)
num_frames += self._frames_per_iter
# Collect parameters to distribute to downstream actors.
self._params_for_actor = (num_frames, jax.device_get(params))
# Collect and write logs out.
logs = jax.device_get(logs)
logs.update({
'num_frames': num_frames,
})
self._logger.write(logs)
# Shut down.
self._done = True
self._logger.close()
transfer_thread.join()
|
host_state.py
|
"""
Global shared state about the host.
"""
import sys
import threading
import time
import utils
CLIENT_VERSION = '1.0.3'
class HostState(object):
def __init__(self, fiat_auth, predictor):
self.host_ip = None
self.host_mac = None
self.gateway_ip = None
self.packet_processor = None
self.user_key = None
self.secret_salt = None
self.client_version = CLIENT_VERSION
self.persistent_mode = True # Always persistent to remove local Flask
self.raspberry_pi_mode = False # If true, app does not auto-quit upon UI inactivity
# The following objects might be modified concurrently.
self.lock = threading.Lock()
#self.ip_mac_dict = {} # IP -> MAC
self.ip_mac_dict = {'192.168.5.1': 'b8:27:eb:8e:74:ef', '192.168.5.6': '18:69:d8:5b:be:7c', '192.168.5.14': '30:fd:38:7b:62:51', '192.168.5.15': '2c:aa:8e:15:da:5b', '192.168.5.19': '6a:6f:ad:75:45:d9'}
self.pending_dhcp_dict = {} # device_id -> hostname
self.pending_resolver_dict = {} # device_id -> resolver_ip
self.pending_dns_dict = {} # (device_id, domain) -> ip_set
self.pending_flow_dict = {} # flow_key -> flow_stats
self.pending_ua_dict = {} # device_id -> ua_set
self.pending_tls_dict_list = [] # List of tls_dict
self.pending_netdisco_dict = {} # device_id -> device_info_list
self.pending_syn_scan_dict = {} # device_id -> port_list
self.status_text = None
#self.device_whitelist = ['s3df95f7a87', 'sb48959b20c', 's4dbce800d0', 's3425f51919', 's30dac03a76']
self.device_whitelist = ['s3df95f7a87', 'sb48959b20c']
#self.device_whitelist = []
self.has_consent = False
self.byte_count = 0
self.is_inspecting_traffic = True
self.fast_arp_scan = True # Persists for first 5 mins
self.last_ui_contact_ts = time.time() # ts of /is_inspecting_traffic
self.quit = False
self.spoof_arp = True
# FIAT
self.fiat_auth = fiat_auth
self.predictor = predictor
# Constantly checks for IP changes on this host
thread = threading.Thread(target=self.update_ip_thread)
thread.daemon = True
thread.start()
def set_ip_mac_mapping(self, ip, mac):
with self.lock:
self.ip_mac_dict[ip] = mac
def get_ip_mac_dict_copy(self):
with self.lock:
return dict(self.ip_mac_dict)
def is_inspecting(self):
with self.lock:
return self.is_inspecting_traffic
def update_ip_thread(self):
prev_gateway_ip = None
prev_host_ip = None
while True:
try:
self.gateway_ip, _, self.host_ip = utils.get_default_route()
except Exception:
pass
# Upon network changes, clear ARP cache.
if self.gateway_ip != prev_gateway_ip or \
self.host_ip != prev_host_ip:
with self.lock:
self.ip_mac_dict = {}
self.ip_mac_dict = {'192.168.5.1': 'b8:27:eb:8e:74:ef', '192.168.5.6': '18:69:d8:5b:be:7c', '192.168.5.14': '30:fd:38:7b:62:51', '192.168.5.15': '2c:aa:8e:15:da:5b', '192.168.5.19': '6a:6f:ad:75:45:d9'}
prev_gateway_ip = self.gateway_ip
prev_host_ip = self.host_ip
time.sleep(15)
|
imageNetMemory.py
|
'''
Created on Sep 20, 2021
@author: thomas
'''
from multiprocessing import Process, Queue
import webdataset as wds
from torch.utils.data import DataLoader
class ComChannel():
def __init__(self, qIn, qOut, process):
self.qIn = qIn
self.qOut = qOut
self.process = process
self.retrievedElements = 0;
def readImageNet(q1, q2, imageNetUrl, batchsize, classes = ["__key__","jpg;png","cls"]):
'''
read data from an imageNet File and put batches of a defined size into a queue to be extracted elsewhere
it puts triplets of keys / items / classes into the queue. If all data is read, it will add `False` to the queue
Parameters batches should have at most batchsize elements, but can have less, depending on the number of workers.
q1 : a multiprocessing queue to put batches of images in (multiprocessing.Queue)
q1 : a multiprocessing queue obtain signals when batches are taken out (multiprocessing.Queue)
imageNetUrl : a FileName/URL of an imageNet file (String)
batchsize : the batchsize of the dataloader (int)
'''
print('Starting to read file')
queuesize = 0;
if len(classes) == 0:
dataset = wds.WebDataset(imageNetUrl).shuffle(1000)
else:
dataset = wds.WebDataset(imageNetUrl).shuffle(1000).to_tuple(*classes)
for sample in dataset:
if queuesize > batchsize:
# this is ugly, and I would prefer something better...
retrievedEntries = q2.get(block=True)
queuesize-=retrievedEntries
q1.put(sample)
queuesize+=1
#Finally, if we can't read anything any more, we send a signal to close the Process and to close the queue.
q1.put(False)
def readImageNetBasic(q1, imageNetUrl):
'''
read data from an imageNet File and put it into the given queue
q1 : a multiprocessing queue to put batches of images in (multiprocessing.Queue)
q1 : a multiprocessing queue obtain signals when batches are taken out (multiprocessing.Queue)
imageNetUrl : a FileName/URL of an imageNet file (String)
'''
print('Starting to read file')
print(imageNetUrl)
dataset = wds.WebDataset(imageNetUrl)
dataloader = DataLoader(dataset)
for element in dataloader:
q1.put(element)
#Finally, if we can't read anything any more, we send a signal to close the Process and to close the queue.
q1.put(False)
class imageNetMemory(object):
'''
classdocs
'''
def __init__(self, pushQueue, waitQueue, imageNetFiles, batchSize=1000, classes = ["__key__","jpg;png","cls"]):
'''
Constructor
'''
self.readerCount = 0;
self.resultQueue = pushQueue
self.batchSize = batchSize
self.classes = classes
self.waitQueue = waitQueue
self.pushedCount = 0
self.comChannels = [];
if type(imageNetFiles) == type("") or type(imageNetFiles) == type(''):
imageNetFiles = [imageNetFiles]
self.imageNetFiles = imageNetFiles
for file in self.imageNetFiles:
q_get = Queue();
q_push = Queue();
p = Process(target=readImageNet, args=(q_get,q_push,file,self.batchSize, self.classes))
p.start()
self.comChannels.append(ComChannel(q_get,q_push,p))
self.readerCount += 1;
def __del__(self):
for comChannel in self.comChannels:
#kill all spawned processes, if this object is killed
comChannel.process.kill()
def start(self):
while self.readerCount > 0:
chansToDel = []
for comChannel in self.comChannels:
if not comChannel.qIn.empty():
pushElement = comChannel.qIn.get();
if pushElement:
self.resultQueue.put(pushElement)
comChannel.retrievedElements += 1;
else:
self.readerCount-=1
chansToDel.append(comChannel)
#clean up remaining processes
for comChannel in chansToDel:#
#End process
comChannel.process.join()
self.comChannels.remove(comChannel)
if not self.waitQueue.empty():
#clear the element
self.waitQueue.get()
#If we have processed data, clear the processed data from all reading processes
removed = self.batchSize;
for comChannel in self.comChannels:
# free Com channels
c_removed = comChannel.retrievedElements;
if ((removed - c_removed) < 0):
toRemove = removed
else:
toRemove = c_removed
removed = removed - c_removed;
comChannel.qOut.put(toRemove)
comChannel.retrievedElements = c_removed - toRemove;
#Send the termination signal
self.resultQueue.put(False)
|
serve.py
|
#!/usr/bin/python
import os
import threading
from time import sleep, time
import datetime
import sys
from ClientSettings import settings
from bin import communication, monitor
Communication = communication.Communication()
Monitor = monitor.Monitor()
BASE_DIR = os.getcwd()
FILE_DIR = (BASE_DIR + '/static/')
FILE = (BASE_DIR + '/static/data.json')
def fileCheck():
if os.path.isfile(FILE):
return True
else:
return False
def start():
while True:
print datetime.datetime.now()
if not Monitor.setData(FILE):
print 'We hit rock bottom'
return False
sleep(settings.INTERVAL)
def runServer():
print 'serving at http://127.0.0.1:' + str(settings.PORT)
try:
Communication.run_server(FILE_DIR)
except ValueError:
sys.exit("Socket not available.")
if __name__ == "__main__":
if fileCheck():
# Spawning a thread both of them.
timerThread = threading.Thread(target=start)
timerThread.start()
serverThread = threading.Thread(target=runServer)
serverThread.start()
else:
print 'Sorry, there is no file to serve.'
|
fixtures.py
|
import threading
import pytest
from contextlib import contextmanager
import os
import logging
import sys
from robotframework_ls.options import USE_TIMEOUTS, NO_TIMEOUT
__file__ = os.path.abspath(__file__) # @ReservedAssignment
log = logging.getLogger(__name__)
TIMEOUT = int(os.getenv("PYTEST_TIMEOUT", 7))
if not USE_TIMEOUTS:
TIMEOUT = NO_TIMEOUT # i.e.: None
LIBSPEC_1 = """<?xml version="1.0" encoding="UTF-8"?>
<keywordspec name="case1_library" type="library" format="ROBOT" generated="20200316 10:45:35">
<version></version>
<scope>global</scope>
<namedargs>yes</namedargs>
<doc>Documentation for library ``case1_library``.</doc>
<kw name="new Verify Another Model">
<arguments>
<arg>new model=10</arg>
</arguments>
<doc></doc>
<tags>
</tags>
</kw>
<kw name="New Verify Model">
<arguments>
<arg>new model</arg>
</arguments>
<doc>:type new_model: int</doc>
<tags>
</tags>
</kw>
</keywordspec>
"""
LIBSPEC_2 = """<?xml version="1.0" encoding="UTF-8"?>
<keywordspec name="case2_library" type="library" format="ROBOT" generated="20200316 10:45:35">
<version></version>
<scope>global</scope>
<namedargs>yes</namedargs>
<doc>Documentation for library ``case2_library``.</doc>
<kw name="Case 2 Verify Another Model">
<arguments>
<arg>new model=10</arg>
</arguments>
<doc></doc>
<tags>
</tags>
</kw>
<kw name="Case 2 Verify Model">
<arguments>
<arg>new model</arg>
</arguments>
<doc>:type new_model: int</doc>
<tags>
</tags>
</kw>
</keywordspec>
"""
LIBSPEC_2_A = """<?xml version="1.0" encoding="UTF-8"?>
<keywordspec name="case2_library" type="library" format="ROBOT" generated="20200316 10:45:35">
<version></version>
<scope>global</scope>
<namedargs>yes</namedargs>
<doc>Documentation for library ``case2_library``.</doc>
<kw name="Case 2 A Verify Another Model">
<arguments>
<arg>new model=10</arg>
</arguments>
<doc></doc>
<tags>
</tags>
</kw>
<kw name="Case 2 A Verify Model">
<arguments>
<arg>new model</arg>
</arguments>
<doc>:type new_model: int</doc>
<tags>
</tags>
</kw>
</keywordspec>
"""
def wait_for_condition(condition, msg=None, timeout=TIMEOUT, sleep=1 / 20.0):
from robotframework_ls._utils import wait_for_condition as w
return w(condition, msg=msg, timeout=timeout, sleep=sleep)
@pytest.fixture
def ws_root_path(tmpdir):
return str(tmpdir.join("root"))
@contextmanager
def _communicate_lang_server(
write_to, read_from, language_server_client_class=None, kwargs={}
):
if language_server_client_class is None:
from robotframework_ls_tests.language_server_client import _LanguageServerClient
language_server_client_class = _LanguageServerClient
from robotframework_ls.jsonrpc.streams import (
JsonRpcStreamReader,
JsonRpcStreamWriter,
)
w = JsonRpcStreamWriter(write_to, sort_keys=True)
r = JsonRpcStreamReader(read_from)
language_server = language_server_client_class(w, r, **kwargs)
yield language_server
if language_server.require_exit_messages:
language_server.shutdown()
language_server.exit()
@pytest.fixture
def language_server_tcp(log_file):
"""
Starts a language server in the same process and communicates through tcp.
"""
from robotframework_ls.__main__ import main
import socket
from robotframework_ls_tests.monitor_fixtures import dump_threads
class _LanguageServerConfig(object):
address = None
config = _LanguageServerConfig()
start_event = threading.Event()
finish_event = threading.Event()
language_server_instance_final = []
def after_bind(server):
address = server.socket.getsockname()
config.address = address
start_event.set()
def start_language_server():
def language_server_class(*args, **kwargs):
from robotframework_ls.robotframework_ls_impl import (
RobotFrameworkLanguageServer,
)
language_server_instance = RobotFrameworkLanguageServer(*args, **kwargs)
language_server_instance_final.append(language_server_instance)
return language_server_instance
main(
[
"--tcp",
"--host=127.0.0.1",
"--port=0",
"-vv",
"--log-file=%s" % log_file,
],
after_bind=after_bind,
language_server_class=language_server_class,
)
finish_event.set()
t = threading.Thread(target=start_language_server, name="Language Server", args=())
t.start()
assert start_event.wait(TIMEOUT)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(config.address)
write_to = s.makefile("wb")
read_from = s.makefile("rb")
with _communicate_lang_server(write_to, read_from) as lang_server_client:
wait_for_condition(lambda: len(language_server_instance_final) == 1)
lang_server_client.language_server_instance = language_server_instance_final[0]
yield lang_server_client
if not finish_event.wait(TIMEOUT):
dump_threads()
raise AssertionError(
"Language server thread did not exit in the available timeout."
)
@pytest.fixture
def log_file(tmpdir):
logs_dir = tmpdir.join("logs")
logs_dir.mkdir()
filename = str(logs_dir.join("robotframework_ls_tests.log"))
sys.stderr.write("Logging subprocess to: %s" % (filename,))
yield filename
for name in os.listdir(str(logs_dir)):
print("\n--- %s contents:" % (name,))
with open(str(logs_dir.join(name)), "r") as stream:
print(stream.read())
@pytest.fixture(autouse=True)
def config_logger(tmpdir):
from robotframework_ls.robotframework_log import configure_logger
configure_logger("test", 2)
@pytest.fixture
def language_server_process(log_file):
from robotframework_ls import __main__
from robotframework_ls._utils import kill_process_and_subprocesses
import subprocess
language_server_process = subprocess.Popen(
[sys.executable, "-u", __main__.__file__, "-vv", "--log-file=%s" % log_file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
returncode = language_server_process.poll()
assert returncode is None
yield language_server_process
returncode = language_server_process.poll()
if returncode is None:
kill_process_and_subprocesses(language_server_process.pid)
@pytest.fixture(autouse=True, scope="session")
def sync_builtins(tmpdir_factory, cases):
"""
Pre-generate the builtins.
"""
from robotframework_ls.impl.libspec_manager import LibspecManager
import shutil
user_home = str(tmpdir_factory.mktemp("ls_user_home"))
os.environ["ROBOTFRAMEWORK_LS_USER_HOME"] = user_home
internal_libspec_dir = LibspecManager.get_internal_builtins_libspec_dir()
try:
os.makedirs(internal_libspec_dir)
except:
# Ignore exception if it's already created.
pass
builtin_libs = cases.get_path("builtin_libs")
# Uncomment the line to regenerate the libspec files for the builtin libraries.
# LibspecManager(builtin_libspec_dir=builtin_libs)
# Note: use private copy instead of re-creating because it's one of the
# slowest things when starting test cases.
# Locally it's the difference from the test suite taking 15 or 25 seconds
# (with tests with 12 cpus in parallel).
for name in os.listdir(builtin_libs):
shutil.copyfile(
os.path.join(builtin_libs, name), os.path.join(internal_libspec_dir, name)
)
@pytest.fixture
def libspec_manager(tmpdir):
from robotframework_ls.impl.libspec_manager import LibspecManager
libspec_manager = LibspecManager(user_libspec_dir=str(tmpdir.join("user_libspec")))
yield libspec_manager
libspec_manager.dispose()
@pytest.fixture
def language_server_io(language_server_process):
"""
Starts a language server in a new process and communicates through stdin/stdout streams.
"""
write_to = language_server_process.stdin
read_from = language_server_process.stdout
with _communicate_lang_server(write_to, read_from) as lang_server_client:
yield lang_server_client
@pytest.fixture(params=["io", "tcp"])
def language_server(request):
if request.param == "io":
return request.getfixturevalue("language_server_io")
else:
return request.getfixturevalue("language_server_tcp")
class _CasesFixture(object):
def __init__(self):
self.resources_dir = os.path.join(os.path.dirname(__file__), "_resources")
assert os.path.exists(self.resources_dir)
def get_path(self, resources_relative_path, must_exist=True):
path = os.path.join(self.resources_dir, resources_relative_path)
if must_exist:
assert os.path.exists(path), "%s does not exist." % (path,)
return path
def copy_to(self, case, dest_dir):
import shutil
shutil.copytree(self.get_path(case, must_exist=True), dest_dir)
@pytest.fixture(scope="session")
def cases():
return _CasesFixture()
class _WorkspaceFixture(object):
def __init__(self, cases):
self._cases = cases
self._ws = None
@property
def ws(self):
if self._ws is None:
raise AssertionError(
"set_root must be called prior to using the workspace."
)
return self._ws
def set_root(self, relative_path, **kwargs):
from robotframework_ls import uris
from robotframework_ls.impl.robot_workspace import RobotWorkspace
path = self._cases.get_path(relative_path)
self._ws = RobotWorkspace(uris.from_fs_path(path), **kwargs)
def get_doc(self, root_relative_path, create=True):
from robotframework_ls import uris
path = os.path.join(self._ws.root_path, root_relative_path)
uri = uris.from_fs_path(path)
return self.ws.get_document(uri, create=create)
@pytest.fixture
def workspace(cases):
return _WorkspaceFixture(cases)
|
monitor.py
|
# -*- coding: utf-8 -*-
# File: monitor.py
import json
import numpy as np
import operator
import os
import re
import shutil
import time
from collections import defaultdict
from datetime import datetime
import six
import threading
from ..compat import tfv1 as tf
from ..libinfo import __git_version__
from ..tfutils.summary import create_image_summary, create_scalar_summary
from ..utils import fs, logger
from ..utils.develop import HIDE_DOC
from .base import Callback
__all__ = ['MonitorBase', 'Monitors',
'TFEventWriter', 'JSONWriter',
'ScalarPrinter', 'SendMonitorData',
'CometMLMonitor']
def image_to_nhwc(arr):
if arr.ndim == 4:
pass
elif arr.ndim == 3:
if arr.shape[-1] in [1, 3, 4]:
arr = arr[np.newaxis, :]
else:
arr = arr[:, :, :, np.newaxis]
elif arr.ndim == 2:
arr = arr[np.newaxis, :, :, np.newaxis]
else:
raise ValueError("Array of shape {} is not an image!".format(arr.shape))
return arr
class MonitorBase(Callback):
"""
Base class for monitors which monitor a training progress, by processing different types of
summary/statistics from trainer.
.. document private functions
.. automethod:: _setup_graph
"""
_chief_only = False
def setup_graph(self, trainer):
# Set attributes following Callback.setup_graph
self.trainer = trainer
self.graph = tf.get_default_graph()
self._setup_graph()
def _setup_graph(self):
""" Override this method to setup the monitor."""
pass
def process_summary(self, summary):
"""
Process a tf.Summary.
"""
pass
def process(self, name, val):
"""
Process a key-value pair.
"""
pass
def process_scalar(self, name, val):
"""
Args:
val: a scalar
"""
pass
def process_image(self, name, val):
"""
Args:
val (np.ndarray): 4D (NHWC) numpy array of images in range [0,255].
If channel is 3, assumed to be RGB.
"""
pass
def process_event(self, evt):
"""
Args:
evt (tf.Event): the most basic format acceptable by tensorboard.
It could include Summary, RunMetadata, LogMessage, and more.
"""
pass
# TODO process other types
class NoOpMonitor(MonitorBase):
def __init__(self, name=None):
self._name = name
def __str__(self):
if self._name is None:
return "NoOpMonitor"
return "NoOpMonitor({})".format(self._name)
class Monitors(Callback):
"""
Merge monitors together for trainer to use.
In training, each trainer will create a :class:`Monitors` instance,
and you can access it through ``trainer.monitors``.
You should use ``trainer.monitors`` for logging and it will dispatch your
logs to each sub-monitor.
"""
_chief_only = False
def __init__(self, monitors):
self._scalar_history = ScalarHistory()
self._monitors = monitors + [self._scalar_history]
for m in self._monitors:
assert isinstance(m, MonitorBase), m
def _setup_graph(self):
# scalar_history's other methods were not called.
# but they are not useful for now
self._scalar_history.setup_graph(self.trainer)
def _dispatch(self, func):
for m in self._monitors:
func(m)
def put_summary(self, summary):
"""
Put a `tf.Summary`.
"""
if isinstance(summary, six.binary_type):
summary = tf.Summary.FromString(summary)
assert isinstance(summary, tf.Summary), type(summary)
# TODO other types
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag) # TODO move to subclasses
# TODO This hack is still needed, seem to disappear only when
# compiled from source.
suffix = '-summary' # tensorflow#6150, tensorboard#59
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
def put_scalar(self, name, val):
"""
Put a scalar.
"""
if isinstance(val, np.floating):
val = float(val)
if isinstance(val, np.integer):
val = int(val)
self._dispatch(lambda m: m.process_scalar(name, val))
s = create_scalar_summary(name, val)
self._dispatch(lambda m: m.process_summary(s))
def put_image(self, name, val):
"""
Put an image.
Args:
name (str):
val (np.ndarray): 2D, 3D (HWC) or 4D (NHWC) numpy array of images
in range [0,255]. If channel is 3, assumed to be RGB.
"""
assert isinstance(val, np.ndarray)
arr = image_to_nhwc(val)
self._dispatch(lambda m: m.process_image(name, arr))
s = create_image_summary(name, arr)
self._dispatch(lambda m: m.process_summary(s))
def put_event(self, evt):
"""
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
"""
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt))
def get_latest(self, name):
"""
Get latest scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
scalar
"""
return self._scalar_history.get_latest(name)[1]
def get_history(self, name):
"""
Get a history of the scalar value of some data.
If you run multiprocess training, keep in mind that
the data is perhaps only available on chief process.
Returns:
a list of (global_step, value) pairs: history data for this scalar
"""
return self._scalar_history.get_history(name)
class TFEventWriter(MonitorBase):
"""
Write summaries to TensorFlow event file.
"""
def __init__(self, logdir=None, max_queue=10, flush_secs=120, split_files=False):
"""
Args:
logdir: ``logger.get_logger_dir()`` by default.
max_queue, flush_secs: Same as in :class:`tf.summary.FileWriter`.
split_files: if True, split events to multiple files rather than
append to a single file. Useful on certain filesystems where append is expensive.
"""
if logdir is None:
logdir = logger.get_logger_dir()
assert tf.gfile.IsDirectory(logdir), logdir
self._logdir = fs.normpath(logdir)
self._max_queue = max_queue
self._flush_secs = flush_secs
self._split_files = split_files
def __new__(cls, logdir=None, max_queue=10, flush_secs=120, **kwargs):
if logdir is None:
logdir = logger.get_logger_dir()
if logdir is not None:
return super(TFEventWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore TFEventWriter.")
return NoOpMonitor("TFEventWriter")
def _setup_graph(self):
self._writer = tf.summary.FileWriter(
self._logdir, max_queue=self._max_queue, flush_secs=self._flush_secs)
def _write_graph(self):
self._writer.add_graph(self.graph)
def _before_train(self):
# Writing the graph is expensive (takes ~2min) when the graph is large.
# Therefore use a separate thread. It will then run in the
# background while TF is warming up in the first several iterations.
self._write_graph_thread = threading.Thread(target=self._write_graph)
self._write_graph_thread.daemon = True
self._write_graph_thread.start()
@HIDE_DOC
def process_summary(self, summary):
self._writer.add_summary(summary, self.global_step)
@HIDE_DOC
def process_event(self, evt):
self._writer.add_event(evt)
def _trigger(self): # flush every epoch
self._writer.flush()
if self._split_files:
self._writer.close()
self._writer.reopen() # open new file
def _after_train(self):
self._writer.close()
class JSONWriter(MonitorBase):
"""
Write all scalar data to a json file under ``logger.get_logger_dir()``, grouped by their global step.
If found an earlier json history file, will append to it.
"""
FILENAME = 'stats.json'
"""
The name of the json file. Do not change it.
"""
def __new__(cls):
if logger.get_logger_dir():
return super(JSONWriter, cls).__new__(cls)
else:
logger.warn("logger directory was not set. Ignore JSONWriter.")
return NoOpMonitor("JSONWriter")
@staticmethod
def load_existing_json(dir=None):
"""
Look for an existing json under dir (defaults to
:meth:`logger.get_logger_dir()`) named "stats.json",
and return the loaded list of statistics if found. Returns None otherwise.
"""
if dir is None:
dir = logger.get_logger_dir()
fname = os.path.join(dir, JSONWriter.FILENAME)
if tf.gfile.Exists(fname):
with open(fname) as f:
stats = json.load(f)
assert isinstance(stats, list), type(stats)
return stats
return None
@staticmethod
def load_existing_epoch_number(dir=None):
"""
Try to load the latest epoch number from an existing json stats file (if any).
Returns None if not found.
"""
stats = JSONWriter.load_existing_json(dir)
try:
return int(stats[-1]['epoch_num'])
except Exception:
return None
# initialize the stats here, because before_train from other callbacks may use it
def _setup_graph(self):
self._stats = []
self._stat_now = {}
self._last_gs = -1
def _before_train(self):
stats = JSONWriter.load_existing_json()
self._fname = os.path.join(logger.get_logger_dir(), JSONWriter.FILENAME)
if stats is not None:
try:
epoch = stats[-1]['epoch_num'] + 1
except Exception:
epoch = None
# check against the current training settings
# therefore this logic needs to be in before_train stage
starting_epoch = self.trainer.loop.starting_epoch
if epoch is None or epoch == starting_epoch:
logger.info("Found existing JSON inside {}, will append to it.".format(logger.get_logger_dir()))
self._stats = stats
else:
logger.warn(
"History epoch={} from JSON is not the predecessor of the current starting_epoch={}".format(
epoch - 1, starting_epoch))
logger.warn("If you want to resume old training, either use `AutoResumeTrainConfig` "
"or correctly set the new starting_epoch yourself to avoid inconsistency. ")
backup_fname = JSONWriter.FILENAME + '.' + datetime.now().strftime('%m%d-%H%M%S')
backup_fname = os.path.join(logger.get_logger_dir(), backup_fname)
logger.warn("Now, we will train with starting_epoch={} and backup old json to {}".format(
self.trainer.loop.starting_epoch, backup_fname))
shutil.move(self._fname, backup_fname)
# in case we have something to log here.
self._trigger()
def _trigger_step(self):
# will do this in trigger_epoch
if self.local_step != self.trainer.steps_per_epoch - 1:
self._trigger()
def _trigger_epoch(self):
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._stat_now[name] = val
def _trigger(self):
"""
Add stats to json and dump to disk.
Note that this method is idempotent.
"""
if len(self._stat_now):
self._stat_now['epoch_num'] = self.epoch_num
self._stat_now['global_step'] = self.global_step
self._stats.append(self._stat_now)
self._stat_now = {}
self._write_stat()
def _write_stat(self):
tmp_filename = self._fname + '.tmp'
try:
with open(tmp_filename, 'w') as f:
json.dump(self._stats, f)
shutil.move(tmp_filename, self._fname)
except IOError: # disk error sometimes..
logger.exception("Exception in JSONWriter._write_stat()!")
class ScalarPrinter(MonitorBase):
"""
Print scalar data into terminal.
"""
def __init__(self, enable_step=False, enable_epoch=True,
whitelist=None, blacklist=None):
"""
Args:
enable_step, enable_epoch (bool): whether to print the
monitor data (if any) between steps or between epochs.
whitelist (list[str] or None): A list of regex. Only names
matching some regex will be allowed for printing.
Defaults to match all names.
blacklist (list[str] or None): A list of regex. Names matching
any regex will not be printed. Defaults to match no names.
"""
def compile_regex(rs):
if rs is None:
return None
rs = {re.compile(r) for r in rs}
return rs
self._whitelist = compile_regex(whitelist)
if blacklist is None:
blacklist = []
self._blacklist = compile_regex(blacklist)
self._enable_step = enable_step
self._enable_epoch = enable_epoch
self._dic = {}
# in case we have something to log here.
def _before_train(self):
self._trigger()
def _trigger_step(self):
if self._enable_step:
if self.local_step != self.trainer.steps_per_epoch - 1:
# not the last step
self._trigger()
else:
if not self._enable_epoch:
self._trigger()
# otherwise, will print them together
def _trigger_epoch(self):
if self._enable_epoch:
self._trigger()
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name] = float(val)
def _trigger(self):
# Print stats here
def match_regex_list(regexs, name):
for r in regexs:
if r.search(name) is not None:
return True
return False
for k, v in sorted(self._dic.items(), key=operator.itemgetter(0)):
if self._whitelist is None or \
match_regex_list(self._whitelist, k):
if not match_regex_list(self._blacklist, k):
logger.info('{}: {:.5g}'.format(k, v))
self._dic = {}
class ScalarHistory(MonitorBase):
"""
Only internally used by monitors.
"""
def __init__(self):
self._dic = defaultdict(list)
@HIDE_DOC
def process_scalar(self, name, val):
self._dic[name].append((self.global_step, float(val)))
def get_latest(self, name):
hist = self._dic[name]
if len(hist) == 0:
raise KeyError("No available data for the key: {}".format(name))
else:
return hist[-1]
def get_history(self, name):
return self._dic[name]
class SendMonitorData(MonitorBase):
"""
Execute a command with some specific scalar monitor data.
This is useful for, e.g. building a custom statistics monitor.
It will try to send once receiving all the stats
"""
def __init__(self, command, names):
"""
Args:
command(str): a command to execute. Use format string with stat
names as keys.
names(list or str): data name(s) to use.
Example:
Send the stats to your phone through pushbullet:
.. code-block:: python
SendMonitorData('curl -u your_id: https://api.pushbullet.com/v2/pushes \\
-d type=note -d title="validation error" \\
-d body={validation_error} > /dev/null 2>&1',
'validation_error')
"""
self.command = command
if not isinstance(names, list):
names = [names]
self.names = names
self.dic = {}
@HIDE_DOC
def process_scalar(self, name, val):
if name in self.names:
self.dic[name] = val
def _trigger_step(self):
self._trigger()
def _trigger(self):
try:
v = {k: self.dic[k] for k in self.names}
except KeyError:
return
cmd = self.command.format(**v)
ret = os.system(cmd)
if ret != 0:
logger.error("Command '{}' failed with ret={}!".format(cmd, ret))
self.dic = {}
class CometMLMonitor(MonitorBase):
"""
Send scalar data and the graph to https://www.comet.ml.
Note:
1. comet_ml requires you to `import comet_ml` before importing tensorflow or tensorpack.
2. The "automatic output logging" feature of comet_ml will make the training progress bar appear to freeze.
Therefore the feature is disabled by default.
"""
def __init__(self, experiment=None, tags=None, **kwargs):
"""
Args:
experiment (comet_ml.Experiment): if provided, invalidate all other arguments
tags (list[str]): experiment tags
kwargs: arguments used to initialize :class:`comet_ml.Experiment`,
such as project name, API key, etc.
Refer to its documentation for details.
"""
if experiment is not None:
self._exp = experiment
assert tags is None and len(kwargs) == 0
else:
from comet_ml import Experiment
kwargs.setdefault('log_code', True) # though it's not functioning, git patch logging requires it
kwargs.setdefault('auto_output_logging', None)
self._exp = Experiment(**kwargs)
if tags is not None:
self._exp.add_tags(tags)
self._exp.set_code("Code logging is impossible ...")
self._exp.log_dependency('tensorpack', __git_version__)
@property
def experiment(self):
"""
The :class:`comet_ml.Experiment` instance.
"""
return self._exp
def _before_train(self):
self._exp.set_model_graph(tf.get_default_graph())
@HIDE_DOC
def process_scalar(self, name, val):
self._exp.log_metric(name, val, step=self.global_step)
@HIDE_DOC
def process_image(self, name, val):
self._exp.set_step(self.global_step)
for idx, v in enumerate(val):
log_name = "{}_step{}{}".format(
name,
self.global_step,
"_" + str(idx) if len(val) > 1 else "")
self._exp.log_image(v, image_format="jpeg", name=log_name, image_minmax=(0, 255))
def _after_train(self):
self._exp.end()
def _after_epoch(self):
self._exp.log_epoch_end(self.epoch_num)
|
multiprocessing_module.py
|
import multiprocessing
from level_2.module.gesture_control import main_avm
queue_shared = multiprocessing.Queue()
process_object = multiprocessing.Process(target = main_avm, args = (queue_shared,))
|
updatedb.py
|
# -*- coding:utf-8 -*-
import sys
import time
import os
from time import clock
from datetime import datetime,date,timedelta
import logging
from multiprocessing import Process
import threading, multiprocessing
from nsnqtlib.servers import serverlist
from nsnqtlib.db import mongodb
from nsnqtlib.utils import WindQuote
from nsnqtlib.utils.basequote import *
from nsnqtlib.config import DB_SERVER,DB_PORT,USER,PWD,AUTHDBNAME
from nsnqtlib.servers.serverlist import LOCAL_SERVER_IP,MONGODB_PORT_DEFAULT
# 399001.SZ 深圳成指,1995-1-23上市,但可以反算到1991-4-3
# 399005.SZ 中小板指,2006-1-24上市,但可以反算到2005-6-7
# 399006.SZ 创业板指,2010-6-1
# 000001.SH 上证综指,1991-7-15上市,但可以反算到1990-12-19
# 000300.SH/399300.SZ 沪深300,2005-4-8上市,但可以反算到2002-1-4
index_list = ["399001.SZ","399005.SZ","399006.SZ","000001.SH","000300.SH"]
dblogger = logging.getLogger()
wq = None
def init_log():
dblogger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log/dbupdate.log')
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s] %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
dblogger.addHandler(fh)
dblogger.addHandler(ch)
# execute update in concurrent processes to improve performance
def update_proc(update_list, db,start_day=datetime.today(),end_day=datetime.today()):
init_log()
dblogger.info('[%s]Child update process started...' % (os.getpid()))
wqp = WindQuote.WndQuery()
quote_string,quote_list = get_quote_string(db)
print("db=%s,str=%s,num=%d" %(db,quote_string,len(update_list)))
update_size = len(update_list)
local_db = mongodb.MongoDB()
ali_db = mongodb.MongoDB(DB_SERVER,DB_PORT,USER,PWD,AUTHDBNAME)
failure_count,success_count= 0,0
start = clock()
for j in range(update_size):
stock_name = update_list[j]
stock_data = wqp.get_history_data(stock_name,quote_string,start_day,end_day)
if stock_data.ErrorCode == 0:
local_db.save_data(db,stock_name,quote_list,stock_data)
ali_db.save_data(db,stock_name,quote_list,stock_data)
success_count = success_count + 1
end = clock()
dblogger.info("[%s]%s in %s update succeeded, loop=%d[%d/%d],used %ds" %(os.getpid(),stock_name,db,j,success_count,update_size,end-start))
else:
failure_count = failure_count +1
dblogger.error("[%s]%s in %s get history data failed, errcode:%s" %(os.getpid(),stock_name, db, stock_data.ErrorCode))
end = clock()
dblogger.info("\n[%d]updated %d/%d stocks, used %ds" %(os.getpid(),success_count,update_size,end-start) )
def get_stock_list():
update_list = wq.wset("listedsecuritygeneralview","sectorid=a001010100000000")
if update_list.ErrorCode != 0:
dblogger.error("get_update_list() failed, errcode:%d" %(update_list.ErrorCode))
else:
dblogger.error("%d stock names returned" %(len(update_list.Data[0])))
return update_list.Data[0]+index_list
'''
w.wset("sectorconstituent","sectorid=1000009163000000") # 上证ETF
w.wset("sectorconstituent","sectorid=1000009164000000") # 深圳ETF
w.wset("sectorconstituent","sectorid=1000023348000000") # A,B均上市母代码
w.wset("sectorconstituent","sectorid=1000023349000000") # A,B均上市A代码
w.wset("sectorconstituent","sectorid=1000023350000000") # A,B军上市B代码
w.wset("sectorconstituent","sectorid=a101020600000000") # 可转债
w.wset("sectorconstituent","sectorid=a001010100000000") # 全部上市A股
w.wset("sectorconstituent","sectorid=1000022276000000") # 全部上市美股
w.wset("sectorconstituent","sectorid=a005010100000000") # NASDAQ 上市股票
w.wset("sectorconstituent","sectorid=a005010200000000") # NYSE 上市股票
w.wset("sectorconstituent","sectorid=1000006535000000") # 美国公募基金
w.wset("sectorconstituent","sectorid=a599010101000000") # 中金所所有品种
w.wset("sectorconstituent","sectorid=a599010201000000") # 上期所全部期货品种
w.wset("sectorconstituent","sectorid=a599010301000000") # 大连期货交易所所有品种
w.wset("sectorconstituent","sectorid=a599010401000000") # 郑州期货交易所所有品种
w.wset("sectorconstituent","sectorid=1000014797000000") # 沪可质押回购债券
'''
def get_etf_list():
update_list = wq.wset("sectorconstituent","sectorid=1000009163000000").Data[1] # 上证ETF
update_list+= wq.wset("sectorconstituent","sectorid=1000009164000000").Data[1] # 深圳ETF
return update_list
def get_bond_list():
update_list = wq.wset("sectorconstituent","sectorid=a101020600000000").Data[1] # 可转债
return update_list
def get_ab_etf_list():
update_list = wq.wset("sectorconstituent","sectorid=1000023348000000").Data[1] # A,B均上市 母代码
return update_list
def get_ab_list():
update_list = wq.wset("sectorconstituent","sectorid=1000023349000000").Data[1] # A,B均上市 A代码
update_list += wq.wset("sectorconstituent","sectorid=1000023350000000").Data[1] # A,B均上市 B代码
return update_list
def mp_update(update_list, process_num,db, start_day, end_day):
if len(update_list) < 1:
dblogger.warning("No records[%d] for %s update" %(len(update_list) , db))
p = list()
for i in range(process_num):
p.append( Process(target=update_proc, args=(update_list[i::process_num],db,start_day,end_day)) )
p[i].start()
dblogger.info("%d processes launched to update %d %s(s)" %(process_num, len(update_list),db))
for j in range(len(p)):
p[j].join()
dblogger.info("============%s update done=============" %(db))
def mp_createdb(update_list, process_num,db):
if len(update_list) < 1:
dblogger.warning("No records[%d] for %s update" %(len(update_list) , db))
p = list()
for i in range(process_num):
p.append( Process(target=update_proc, args=(update_list[i::process_num],db,start_day,end_day)) )
p[i].start()
dblogger.info("%d processes launched to update %d %s(s)" %(process_num, len(update_list),db))
for j in range(len(p)):
p[j].join()
dblogger.info("============%s creation done=============" %(db))
def trigger_server_job():
dblogger.info("============starting server job:update_indicators============")
output = os.popen('java -jar jenkins-cli.jar -s http://114.55.53.68:65050/ build update_indicators')
dblogger.info(output.read()) # will print nothing in success case
dblogger.info("============Trigger server job done=============")
return
# main update entry
def update_execution():
#update stocks in multi-process
#wind can't support more than 12 connections
process_max = multiprocessing.cpu_count()*2
process_min = multiprocessing.cpu_count()
t1 = datetime.today() #(datetime.today()+timedelta(-14)).strftime('%Y-%m-%d')
t2 = datetime.today()
mp_update(get_stock_list(),process_max,"ml_security_table",t1,t2)
#t1,t2 = "2017-1-23","2017-2-8"
mp_update(get_etf_list() , process_min,"etf" ,t1,t2)
mp_update(get_ab_list() , process_max,"ab" ,t1,t2)
mp_update(get_ab_etf_list() , process_max,"ab_etf",t1,t2)
mp_update(get_bond_list(), 1 ,"cbond",t1,t2)
print("============All update done=============")
if __name__ == '__main__':
init_log()
wq = WindQuote.WndQuery()
print("main thread started")
update_execution()
# trigger JOB in server side
trigger_server_job()
#
|
presenter.py
|
import sys
import threading
import time
from datetime import datetime, timedelta
from PyQt5 import QtWidgets, QtCore, QtGui
import view
import timer
class Presenter:
def __init__(self, view_: view.View):
self.view = view_
self.update_time = None
self.timer = 0
self.t = None
def set_default_style(self):
"""Set button back to 'Start' after timer is finished"""
self.view.timer_button.active = False
def timer_button_clicked(self, active):
if active is False: # Start
self.start_timer()
else: # Cancel
self.t.stop()
def start_timer(self):
if timer.isActive is False:
timer.isActive = True
self.view.dial.setDisabled(True)
self.t = timer.MyTimer(self.view.hours, self.view.minutes, self.view.seconds, self.view.dial, self.set_default_style)
self.t.start()
def dial_moved(self, value):
self.timer = value
if self.timer < 60:
self.view.minutes.setText(str(self.timer))
self.view.hours.setText("0")
elif self.timer < 60 * 8: # timer limit is 8 hours - it's timer for studying / working so it doesn't need more
hours = int(self.timer / 60)
minutes = int(self.timer % 60)
self.view.hours.setText(str(hours))
self.view.minutes.setText(str(minutes))
else:
self.timer = 0
def set_current_time(self):
now = datetime.now()
current_time = now.strftime("%H:%M")
week_day = datetime.today().strftime('%A')
self.view.time_label.setText(current_time)
self.view.day_label.setText(week_day)
def set_timer(self):
"""Wait till another minute and start repeated timer with 60 seconds interval"""
time.sleep(60.0 - (time.time() % 60))
self.set_current_time() # set new time and...
self.update_time = RepeatedTimer(timedelta(seconds=60), self.set_current_time) # set repeated timer with 60s interval
self.update_time.start()
def show(self):
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon('Graphic/timer_icon.png'))
MainWindow = QtWidgets.QMainWindow()
self.view.setup(MainWindow)
self.set_current_time()
t = threading.Thread(target=self.set_timer)
t.daemon = True # kill this thread when main thread is killed
t.start()
MainWindow.show()
sys.exit(app.exec_())
class RepeatedTimer(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = True # True: if the main thread is killed this thread will be killed too
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
try:
self.execute(*self.args, **self.kwargs)
except RuntimeError:
"""This exception is rised when progrem is closed"""
self.stopped.set()
|
fabfile.py
|
# Auto-scale Rackspace Cloud servers with Fabric and Celery
#
# Copyright 2012 Nicholas Kuechler
# http://www.nicholaskuechler.com/
"""
Fabric fabfile to build workers using Rackspace Cloud api
and then configure them with the code base.
"""
import cloudservers
import urllib2
import uuid
from time import sleep
from subprocess import call
from threading import Thread
from fabric.api import *
from conf import settings
env.colors = True
env.user = 'root'
env.connection_attempts = 3
env.timeout = 30
env.keepalive = 900
def _get_server_image(cs, image_name):
i = cs.images.find(name=image_name)
return i
def _get_flavor(cs, ram_size):
return cs.flavors.find(ram=ram_size)
def _get_file_contents(file_name):
contents = open(file_name, 'r').read()
return contents
def _reboot_server(cs, s):
""" reboot a cloud server """
s.reboot()
sleep(90)
return True
def _create_server(cs, image_name, ram_size):
get_uuid = uuid.uuid4()
server_name = 'worker-%s' % (get_uuid)
print 'Creating server %s' % server_name
image = _get_server_image(cs, image_name)
if not image:
raise Exception('Could not get server image "%s"' % image_name)
flavor = _get_flavor(cs, ram_size)
if not flavor:
raise Exception('Could not get flavor with %s ram' % ram_size)
server = cs.servers.create(
server_name,
image,
flavor,
)
return server
def _wait_for_server(cs, s, with_url_ping=False):
while s.status != 'ACTIVE':
sleep(60)
s = cs.servers.get(s.id)
print '%s: %s (%s%%)' % (s.id, s.status, s.progress)
if with_url_ping:
# Wait for a response
url = 'http://%s/index.html' % s.public_ip
tries = 0
while True:
try:
print 'Attempting to connect to %s' % url
urllib2.urlopen(url)
print 'ping success, returning'
break
except urllib2.HTTPError, e:
print e
if e.code == 401:
print '401 not authorized'
elif e.code == 404:
print '404 not found... waiting...'
elif e.code == 503:
print '503 service unavailable'
else:
print 'unknown error: %s' % e
sleep(30)
except urllib2.URLError, u:
print u
print 'Connection refused for now...'
sleep(30)
tries += 1
if tries > 20: # 10 minutes
raise Exception('URL ping timed out')
def _install_nginx():
runcmd('aptitude -y install nginx')
def _restart_nginx():
runcmd('/etc/init.d/nginx restart')
def _make_web_directory():
runcmd('mkdir -p /var/www/')
runcmd('echo "<html><body>hello</body></html>" > /var/www/index.html')
def _update_host():
runcmd('aptitude -y update && aptitude -y safe-upgrade')
def _install_server_tools():
# commonly used packages
runcmd('aptitude -y install screen tcpdump dnsutils')
# git / subversion packages
runcmd('aptitude -y install git git-core subversion')
def start_celery_worker():
_start_celery_worker()
def _start_celery_worker():
""" start remote celery worker """
runcmd('/etc/init.d/celeryd start')
def stop_celery_worker():
_stop_celery_worker()
def _stop_celery_worker():
""" stop remote celery worker """
runcmd('/etc/init.d/celeryd stop')
def stop_celery_worker_test():
""" stop remote celery worker """
run('/etc/init.d/celeryd stop', pty=False)
def _set_up_iptables_locally(cs, s):
# set up iptables
# should probably be using local()
call("/sbin/iptables -I INPUT 9 -s %s/32 -p tcp -m state --state NEW -m tcp --dport 3306 -j ACCEPT" % (s.private_ip), shell=True)
call("/sbin/iptables -I INPUT 9 -s %s/32 -p tcp -m state --state NEW -m tcp --dport 5672 -j ACCEPT" % (s.private_ip), shell=True)
def _rsync_codebase_to_worker(cs, s):
""" rsync code base to new worker """
call("/usr/bin/rsync -e \"ssh -o StrictHostKeyChecking=no\" --quiet -av --delete /opt/codebase/ root@%s:/opt/codebase/" % (s.private_ip), shell=True)
@parallel
def create_multiple_workers():
""" deploy new workers in parallel with python threads """
for i in range(settings.NUMBER_OF_WORKERS):
t = Thread(target=deploy_worker, args=(i,))
t.start()
@parallel
def deploy_worker(thread_num):
cs = cloudservers.CloudServers(settings.CLOUDSERVERS_USERNAME,
settings.CLOUDSERVERS_API_KEY)
s = _create_server(cs, settings.CLOUDSERVERS_IMAGE_TEMPLATE, 256)
_wait_for_server(cs, s, with_url_ping=False)
print '%d: %s: Server IP is %s (private: %s)' % (thread_num,
s.id,
s.public_ip,
s.private_ip)
# small delay to allow the server to fully boot up
sleep(60)
# set fabric's env.host_string to the IP address of the new server
env.host_string = s.private_ip
# add iptables rules on master server so workers can connect to it
_set_up_iptables_locally(cs, s)
# rsync the code base to the new worker node
_rsync_codebase_to_worker(cs, s)
# reboot the newly created worker node
print '%d: Rebooting: %s (id: %s)' % (thread_num, s.private_ip, s.id)
_reboot_server(cs, s)
sleep(90)
# start the celery daemon on the new worker node
print '%d: Starting celery worker: %s (id: %s)' % (thread_num,
s.private_ip,
s.id)
env.host_string = s.private_ip
_start_celery_worker()
sleep(30)
# Helpers
def runcmd(arg):
if env.user != "root":
sudo("%s" % arg, pty=True)
else:
run("%s" % arg, pty=True)
|
test_io.py
|
import sys
import gc
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
import numpy_demo as np
import numpy_demo.ma as ma
from numpy_demo.lib._iotools import ConverterError, ConversionWarning
from numpy_demo.compat import asbytes, bytes
from numpy_demo.ma.testutils import assert_equal
from numpy_demo.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
from numpy_demo.testing._private.utils import requires_memory
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy_demo.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_demo_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy_demo/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy_demo npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings.
with suppress_warnings() as sup:
sup.filter(ResourceWarning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
if IS_PYPY:
gc.collect()
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_demo_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work")
@pytest.mark.skipif(IS_PYPY,
reason="GC problems after test, gc.collect does not help. see gh-15775")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)], dtype=object)
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt:
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
def test_record_unicode(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the
# callable in assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy_demo/numpy_demo#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.int_)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_ndfromtxt(self):
# Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
# From `test_fancy_dtype_alt` above
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
test = np.genfromtxt(path, delimiter=',', usemask=True)
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter:
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader:
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
service_streamer.py
|
# coding=utf-8
# Created by Meteorix at 2019/7/13
import logging
import multiprocessing
import os
import threading
import time
import uuid
import weakref
import pickle
from queue import Queue, Empty
from typing import List
from redis import Redis
from .managed_model import ManagedModel
TIMEOUT = 1
TIME_SLEEP = 0.001
WORKER_TIMEOUT = 20
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
mp = multiprocessing.get_context("spawn")
class Future(object):
def __init__(self, task_id, task_size, future_cache_ref):
self._id = task_id
self._size = task_size
self._future_cache_ref = future_cache_ref
self._outputs = []
self._finish_event = threading.Event()
def result(self, timeout=None):
if self._size == 0:
self._finish_event.set()
return []
finished = self._finish_event.wait(timeout)
if not finished:
raise TimeoutError("Task: %d Timeout" % self._id)
# remove from future_cache
future_cache = self._future_cache_ref()
if future_cache is not None:
del future_cache[self._id]
# [(request_id, output), ...] sorted by request_id
self._outputs.sort(key=lambda i: i[0])
# restore batch result from outputs
batch_result = [i[1] for i in self._outputs]
return batch_result
def done(self):
if self._finish_event.is_set():
return True
def _append_result(self, it_id, it_output):
self._outputs.append((it_id, it_output))
if len(self._outputs) >= self._size:
self._finish_event.set()
class _FutureCache(dict):
"Dict for weakref only"
pass
class _BaseStreamer(object):
def __init__(self, *args, **kwargs):
super().__init__()
self._client_id = str(uuid.uuid4())
self._task_id = 0
self._future_cache = _FutureCache() # {task_id: future}
self.back_thread = threading.Thread(target=self._loop_collect_result, name="thread_collect_result")
self.back_thread.daemon = True
self.lock = threading.Lock()
def _delay_setup(self):
self.back_thread.start()
def _send_request(self, task_id, request_id, model_input):
raise NotImplementedError
def _recv_response(self, timeout=TIMEOUT):
raise NotImplementedError
def _input(self, batch: List) -> int:
"""
input a batch, distribute each item to mq, return task_id
"""
# task id in one client
self.lock.acquire()
task_id = self._task_id
self._task_id += 1
self.lock.release()
# request id in one task
request_id = 0
future = Future(task_id, len(batch), weakref.ref(self._future_cache))
self._future_cache[task_id] = future
for model_input in batch:
self._send_request(task_id, request_id, model_input)
request_id += 1
return task_id
def _loop_collect_result(self):
logger.info("start _loop_collect_result")
while True:
message = self._recv_response(timeout=TIMEOUT)
if message:
(task_id, request_id, item) = message
future = self._future_cache[task_id]
future._append_result(request_id, item)
else:
# todo
time.sleep(TIME_SLEEP)
def _output(self, task_id: int) -> List:
future = self._future_cache[task_id]
batch_result = future.result(WORKER_TIMEOUT)
return batch_result
def submit(self, batch):
task_id = self._input(batch)
future = self._future_cache[task_id]
return future
def predict(self, batch):
task_id = self._input(batch)
ret = self._output(task_id)
assert len(batch) == len(ret), "input batch size {} and output batch size {} must be equal.".format(len(batch), len(ret))
return ret
def destroy_workers(self):
raise NotImplementedError
class _BaseStreamWorker(object):
def __init__(self, predict_function, batch_size, max_latency, *args, **kwargs):
super().__init__()
assert callable(predict_function)
self._pid = os.getpid()
self._predict = predict_function
self._batch_size = batch_size
self._max_latency = max_latency
self._destroy_event = kwargs.get("destroy_event", None)
def run_forever(self, *args, **kwargs):
self._pid = os.getpid() # overwrite the pid
logger.info("[gpu worker %d] %s start working" % (self._pid, self))
while True:
handled = self._run_once()
if self._destroy_event and self._destroy_event.is_set():
break
if not handled:
# sleep if no data handled last time
time.sleep(TIME_SLEEP)
logger.info("[gpu worker %d] %s shutdown" % (self._pid, self))
def model_predict(self, batch_input):
batch_result = self._predict(batch_input)
assert len(batch_input) == len(batch_result), "input batch size {} and output batch size {} must be equal.".format(len(batch_input), len(batch_result))
return batch_result
def _run_once(self):
batch = []
start_time = time.time()
for i in range(self._batch_size):
try:
item = self._recv_request(timeout=self._max_latency)
except TimeoutError:
# each item timeout exceed the max latency
break
else:
batch.append(item)
if (time.time() - start_time) > self._max_latency:
# total batch time exceeds the max latency
break
if not batch:
return 0
model_inputs = [i[3] for i in batch]
model_outputs = self.model_predict(model_inputs)
# publish results to redis
for i, item in enumerate(batch):
client_id, task_id, request_id, _ = item
self._send_response(client_id, task_id, request_id, model_outputs[i])
batch_size = len(batch)
logger.info("[gpu worker %d] run_once batch_size: %d start_at: %s spend: %s" % (
self._pid, batch_size, start_time, time.time() - start_time))
return batch_size
def _recv_request(self, timeout=TIMEOUT):
raise NotImplementedError
def _send_response(self, client_id, task_id, request_id, model_input):
raise NotImplementedError
class ThreadedStreamer(_BaseStreamer):
def __init__(self, predict_function, batch_size, max_latency=0.1):
super().__init__()
self._input_queue = Queue()
self._output_queue = Queue()
self._worker_destroy_event=threading.Event()
self._worker = ThreadedWorker(predict_function, batch_size, max_latency,
self._input_queue, self._output_queue,
destroy_event=self._worker_destroy_event)
self._worker_thread = threading.Thread(target=self._worker.run_forever, name="thread_worker")
self._worker_thread.daemon = True
self._worker_thread.start()
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
def destroy_workers(self):
self._worker_destroy_event.set()
self._worker_thread.join(timeout=WORKER_TIMEOUT)
if self._worker_thread.is_alive():
raise TimeoutError("worker_thread destroy timeout")
logger.info("workers destroyed")
class ThreadedWorker(_BaseStreamWorker):
def __init__(self, predict_function, batch_size, max_latency, request_queue, response_queue, *args, **kwargs):
super().__init__(predict_function, batch_size, max_latency, *args, **kwargs)
self._request_queue = request_queue
self._response_queue = response_queue
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class Streamer(_BaseStreamer):
def __init__(self, predict_function_or_model, batch_size, max_latency=0.1, worker_num=1,
cuda_devices=None, model_init_args=None, model_init_kwargs=None, wait_for_worker_ready=False):
super().__init__()
self.worker_num = worker_num
self.cuda_devices = cuda_devices
self._input_queue = mp.Queue()
self._output_queue = mp.Queue()
self._worker = StreamWorker(predict_function_or_model, batch_size, max_latency,
self._input_queue, self._output_queue,
model_init_args, model_init_kwargs)
self._worker_ps = []
self._worker_ready_events = []
self._worker_destroy_events = []
self._setup_gpu_worker()
if wait_for_worker_ready:
self._wait_for_worker_ready()
self._delay_setup()
def _setup_gpu_worker(self):
for i in range(self.worker_num):
ready_event = mp.Event()
destroy_event = mp.Event()
if self.cuda_devices is not None:
gpu_id = self.cuda_devices[i % len(self.cuda_devices)]
args = (gpu_id, ready_event, destroy_event)
else:
args = (None, ready_event, destroy_event)
p = mp.Process(target=self._worker.run_forever, args=args, name="stream_worker", daemon=True)
p.start()
self._worker_ps.append(p)
self._worker_ready_events.append(ready_event)
self._worker_destroy_events.append(destroy_event)
def _wait_for_worker_ready(self, timeout=WORKER_TIMEOUT):
# wait for all workers finishing init
for (i, e) in enumerate(self._worker_ready_events):
# todo: select all events with timeout
is_ready = e.wait(timeout)
logger.info("gpu worker:%d ready state: %s" % (i, is_ready))
def _send_request(self, task_id, request_id, model_input):
self._input_queue.put((0, task_id, request_id, model_input))
def _recv_response(self, timeout=TIMEOUT):
try:
message = self._output_queue.get(timeout=timeout)
except Empty:
message = None
return message
def destroy_workers(self):
for e in self._worker_destroy_events:
e.set()
for p in self._worker_ps:
p.join(timeout=WORKER_TIMEOUT)
if p.is_alive():
raise TimeoutError("worker_process destroy timeout")
logger.info("workers destroyed")
class StreamWorker(_BaseStreamWorker):
def __init__(self, predict_function_or_model, batch_size, max_latency, request_queue, response_queue,
model_init_args, model_init_kwargs, *args, **kwargs):
super().__init__(predict_function_or_model, batch_size, max_latency, *args, **kwargs)
self._request_queue = request_queue
self._response_queue = response_queue
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
def run_forever(self, gpu_id=None, ready_event=None, destroy_event=None):
# if it is a managed model, lazy init model after forked & set CUDA_VISIBLE_DEVICES
if isinstance(self._predict, type) and issubclass(self._predict, ManagedModel):
model_class = self._predict
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
self._predict = self._model.predict
if ready_event:
ready_event.set() # tell father process that init is finished
if destroy_event:
self._destroy_event = destroy_event
super().run_forever()
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._request_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._response_queue.put((task_id, request_id, model_output))
class RedisStreamer(_BaseStreamer):
"""
1. input batch as a task
2. distribute every single item in batch to redis
3. backend loop collecting results
3. output batch result for a task when every single item is returned
"""
def __init__(self, redis_broker="localhost:6379", prefix=''):
super().__init__()
self.prefix = prefix
self._redis_broker = redis_broker
self._redis = _RedisClient(self._client_id, self._redis_broker, self.prefix)
self._delay_setup()
def _send_request(self, task_id, request_id, model_input):
self._redis.send_request(task_id, request_id, model_input)
def _recv_response(self, timeout=TIMEOUT):
return self._redis.recv_response(timeout)
class RedisWorker(_BaseStreamWorker):
def __init__(self, model_class, batch_size, max_latency=0.1,
redis_broker="localhost:6379", prefix='',
model_init_args=None, model_init_kwargs=None, *args, **kwargs):
# assert issubclass(model_class, ManagedModel)
super().__init__(model_class, batch_size, max_latency, *args, **kwargs)
self.prefix = prefix
self._model_init_args = model_init_args or []
self._model_init_kwargs = model_init_kwargs or {}
self._redis_broker = redis_broker
self._redis = _RedisServer(0, self._redis_broker, self.prefix)
self._requests_queue = Queue()
self.back_thread = threading.Thread(target=self._loop_recv_request, name="thread_recv_request")
self.back_thread.daemon = True
self.back_thread.start()
def run_forever(self, gpu_id=None):
logger.info("[gpu worker %d] init model on gpu:%s" % (os.getpid(), gpu_id))
model_class = self._predict
self._model = model_class(gpu_id)
self._model.init_model(*self._model_init_args, **self._model_init_kwargs)
self._predict = self._model.predict
super().run_forever()
def _loop_recv_request(self):
logger.info("[gpu worker %d] start loop_recv_request" % (os.getpid()))
while True:
message = self._redis.recv_request(timeout=TIMEOUT)
if message:
(client_id, task_id, request_id, request_item) = pickle.loads(message)
self._requests_queue.put((client_id, task_id, request_id, request_item))
else:
# sleep if recv timeout
time.sleep(TIME_SLEEP)
def _recv_request(self, timeout=TIMEOUT):
try:
item = self._requests_queue.get(timeout=timeout)
except Empty:
raise TimeoutError
else:
return item
def _send_response(self, client_id, task_id, request_id, model_output):
self._redis.send_response(client_id, task_id, request_id, model_output)
def _setup_redis_worker_and_runforever(model_class, batch_size, max_latency, gpu_id, redis_broker, prefix=''):
redis_worker = RedisWorker(model_class, batch_size, max_latency, redis_broker=redis_broker, prefix=prefix)
redis_worker.run_forever(gpu_id)
def run_redis_workers_forever(model_class, batch_size, max_latency=0.1,
worker_num=1, cuda_devices=None, redis_broker="localhost:6379",
prefix='', model_init_args=None, model_init_kwargs=None):
procs = []
for i in range(worker_num):
if cuda_devices is not None:
gpu_id = cuda_devices[i % len(cuda_devices)]
else:
gpu_id = None
args = [model_class, batch_size, max_latency, gpu_id, redis_broker, prefix]
p = mp.Process(target=_setup_redis_worker_and_runforever, args=args, name="stream_worker", daemon=True)
p.start()
procs.append(p)
for p in procs:
p.join()
class _RedisAgent(object):
def __init__(self, redis_id, redis_broker='localhost:6379', prefix=''):
self._redis_id = redis_id
self._redis_host = redis_broker.split(":")[0]
self._redis_port = int(redis_broker.split(":")[1])
self._redis_request_queue_name = "request_queue" + prefix
self._redis_response_pb_prefix = "response_pb_" + prefix
self._redis = Redis(host=self._redis_host, port=self._redis_port)
self._response_pb = self._redis.pubsub(ignore_subscribe_messages=True)
self._setup()
def _setup(self):
raise NotImplementedError
def _response_pb_name(self, redis_id):
return self._redis_response_pb_prefix + redis_id
class _RedisClient(_RedisAgent):
def _setup(self):
self._response_pb.subscribe(self._response_pb_name(self._redis_id))
def send_request(self, task_id, request_id, model_input):
message = (self._redis_id, task_id, request_id, model_input)
self._redis.lpush(self._redis_request_queue_name, pickle.dumps(message))
def recv_response(self, timeout):
message = self._response_pb.get_message(timeout=timeout)
if message:
return pickle.loads(message["data"])
class _RedisServer(_RedisAgent):
def _setup(self):
# server subscribe all pubsub
self._response_pb.psubscribe(self._redis_response_pb_prefix + "*")
def recv_request(self, timeout):
message = self._redis.blpop(self._redis_request_queue_name, timeout=timeout)
# (queue_name, data)
if message:
return message[1]
def send_response(self, client_id, task_id, request_id, model_output):
message = (task_id, request_id, model_output)
channel_name = self._response_pb_name(client_id)
self._redis.publish(channel_name, pickle.dumps(message))
|
autoreload.py
|
import functools
import itertools
import logging
import os
import signal
import subprocess
import sys
import threading
import time
import traceback
import weakref
from collections import defaultdict
from pathlib import Path
from types import ModuleType
from zipimport import zipimporter
from django.apps import apps
from django.core.signals import request_finished
from django.dispatch import Signal
from django.utils.functional import cached_property
from django.utils.version import get_version_tuple
autoreload_started = Signal()
file_changed = Signal()
DJANGO_AUTORELOAD_ENV = 'RUN_MAIN'
logger = logging.getLogger('django.utils.autoreload')
# If an error is raised while importing a file, it's not placed in sys.modules.
# This means that any future modifications aren't caught. Keep a list of these
# file paths to allow watching them in the future.
_error_files = []
_exception = None
try:
import termios
except ImportError:
termios = None
try:
import pywatchman
except ImportError:
pywatchman = None
def check_errors(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
global _exception
try:
fn(*args, **kwargs)
except Exception:
_exception = sys.exc_info()
et, ev, tb = _exception
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def raise_last_exception():
global _exception
if _exception is not None:
raise _exception[1]
def ensure_echo_on():
"""
Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after reload.
"""
if not termios or not sys.stdin.isatty():
return
attr_list = termios.tcgetattr(sys.stdin)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def iter_all_python_module_files():
# This is a hot path during reloading. Create a stable sorted list of
# modules based on the module name and pass it to iter_modules_and_files().
# This ensures cached results are returned in the usual case that modules
# aren't loaded on the fly.
keys = sorted(sys.modules)
modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))
return iter_modules_and_files(modules, frozenset(_error_files))
@functools.lru_cache(maxsize=1)
def iter_modules_and_files(modules, extra_files):
"""Iterate through all modules needed to be watched."""
sys_file_paths = []
for module in modules:
# During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
# are added to sys.modules, however they are types not modules and so
# cause issues here.
if not isinstance(module, ModuleType):
continue
if module.__name__ == '__main__':
# __main__ (usually manage.py) doesn't always have a __spec__ set.
# Handle this by falling back to using __file__, resolved below.
# See https://docs.python.org/reference/import.html#main-spec
# __file__ may not exists, e.g. when running ipdb debugger.
if hasattr(module, '__file__'):
sys_file_paths.append(module.__file__)
continue
if getattr(module, '__spec__', None) is None:
continue
spec = module.__spec__
# Modules could be loaded from places without a concrete location. If
# this is the case, skip them.
if spec.has_location:
origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin
sys_file_paths.append(origin)
results = set()
for filename in itertools.chain(sys_file_paths, extra_files):
if not filename:
continue
path = Path(filename)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
# The module could have been removed, don't fail loudly if this
# is the case.
continue
except ValueError as e:
# Network filesystems may return null bytes in file paths.
logger.debug('"%s" raised when resolving path: "%s"' % (str(e), path))
continue
results.add(resolved_path)
return frozenset(results)
@functools.lru_cache(maxsize=1)
def common_roots(paths):
"""
Return a tuple of common roots that are shared between the given paths.
File system watchers operate on directories and aren't cheap to create.
Try to find the minimum set of directories to watch that encompass all of
the files that need to be watched.
"""
# Inspired from Werkzeug:
# https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
# Create a sorted list of the path components, longest first.
path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
tree = {}
for chunks in path_parts:
node = tree
# Add each part of the path to the tree.
for chunk in chunks:
node = node.setdefault(chunk, {})
# Clear the last leaf in the tree.
node.clear()
# Turn the tree into a list of Path instances.
def _walk(node, path):
for prefix, child in node.items():
yield from _walk(child, path + (prefix,))
if not node:
yield Path(*path)
return tuple(_walk(tree, ()))
def sys_path_directories():
"""
Yield absolute directories from sys.path, ignoring entries that don't
exist.
"""
for path in sys.path:
path = Path(path)
try:
resolved_path = path.resolve(strict=True).absolute()
except FileNotFoundError:
continue
# If the path is a file (like a zip file), watch the parent directory.
if resolved_path.is_file():
yield resolved_path.parent
else:
yield resolved_path
def get_child_arguments():
"""
Return the executable. This contains a workaround for Windows if the
executable is reported to not have the .exe extension which can cause bugs
on reloading.
"""
import django.__main__
django_main_path = Path(django.__main__.__file__)
py_script = Path(sys.argv[0])
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions]
if py_script == django_main_path:
# The server was started with `python -m django runserver`.
args += ['-m', 'django']
args += sys.argv[1:]
elif not py_script.exists():
# sys.argv[0] may not exist for several reasons on Windows.
# It may exist with a .exe extension or have a -script.py suffix.
exe_entrypoint = py_script.with_suffix('.exe')
if exe_entrypoint.exists():
# Should be executed directly, ignoring sys.executable.
# TODO: Remove str() when dropping support for PY37.
# args parameter accepts path-like on Windows from Python 3.8.
return [str(exe_entrypoint), *sys.argv[1:]]
script_entrypoint = py_script.with_name('%s-script.py' % py_script.name)
if script_entrypoint.exists():
# Should be executed as usual.
# TODO: Remove str() when dropping support for PY37.
# args parameter accepts path-like on Windows from Python 3.8.
return [*args, str(script_entrypoint), *sys.argv[1:]]
raise RuntimeError('Script %s does not exist.' % py_script)
else:
args += sys.argv
return args
def trigger_reload(filename):
logger.info('%s changed, reloading.', filename)
sys.exit(3)
def restart_with_reloader():
new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'}
args = get_child_arguments()
while True:
p = subprocess.run(args, env=new_environ, close_fds=False)
if p.returncode != 3:
return p.returncode
class BaseReloader:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
try:
path = path.absolute()
except FileNotFoundError:
logger.debug(
'Unable to watch directory %s as it cannot be resolved.',
path,
exc_info=True,
)
return
logger.debug('Watching dir %s with glob %s.', path, glob)
self.directory_globs[path].add(glob)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug('Main Django thread has terminated before apps are ready.')
return False
def run(self, django_main_thread):
logger.debug('Waiting for apps ready_event.')
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug('Apps ready_event triggered. Sending autoreload_started signal.')
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError('subclasses must implement tick().')
@classmethod
def check_availability(cls):
raise NotImplementedError('subclasses must implement check_availability().')
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug('%s notified as changed. Signal results: %s.', path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
class StatReloader(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug('File %s first seen with mtime %s', filepath, mtime)
continue
elif mtime > old_time:
logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
class WatchmanUnavailable(RuntimeError):
pass
class WatchmanReloader(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root)
return
root = root.parent
result = self.client.query('watch-project', str(root.absolute()))
if 'warning' in result:
logger.warning('Watchman warning: %s', result['warning'])
logger.debug('Watchman watch-project result: %s', result)
return result['watch'], result.get('relative_path')
@functools.lru_cache()
def _get_clock(self, root):
return self.client.query('clock', root)['clock']
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
query = {
'expression': expression,
'fields': ['name'],
'since': self._get_clock(root),
'dedup_results': True,
}
if rel_path:
query['relative_root'] = rel_path
logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query)
self.client.query('subscribe', root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'files-parent-%s' % directory.name
filenames = ['%s/%s' % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ['name', filenames, 'wholename']
else:
prefix = 'files'
expression = ['name', filenames]
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = 'glob'
if not directory.exists():
if not directory.parent.exists():
logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory)
return
prefix = 'glob-parent-%s' % directory.name
patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ['anyof']
for pattern in patterns:
expression.append(['match', pattern, 'wholename'])
self._subscribe(directory, '%s:%s' % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug('Watching %s files', len(watched_files))
logger.debug('Found common roots: %s', found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group])
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug('Watchman subscription %s has results.', sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result['subscription'].split(':', 1)[1])
logger.debug('Found root directory %s', root_directory)
for file in result.get('files', []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug('Request processed. Setting update_watches event.')
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug('Watchman error: %s, checking server status.', ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query('version')
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable('pywatchman not installed.')
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable('Cannot connect to the watchman service.')
version = get_version_tuple(result['version'])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug('Watchman version %s', version)
if version < (4, 9):
raise WatchmanUnavailable('Watchman 4.9 or later is required.')
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread')
django_main_thread.setDaemon(True)
django_main_thread.start()
while not reloader.should_stop:
try:
reloader.run(django_main_thread)
except WatchmanUnavailable as ex:
# It's possible that the watchman service shuts down or otherwise
# becomes unavailable. In that case, use the StatReloader.
reloader = StatReloader()
logger.error('Error connecting to Watchman: %s', ex)
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true':
reloader = get_reloader()
logger.info('Watching for file changes with %s', reloader.__class__.__name__)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
|
input_server.py
|
# first to start the nameserver start: python -m Pyro4.naming
import Pyro4
from threading import Thread
import time
import numpy as np
from railrl.launchers import config
# HOSTNAME = "192.168.0.102"
Pyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent'])
Pyro4.config.SERIALIZER='pickle'
device_state = None
@Pyro4.expose
class DeviceState(object):
state = None
def get_state(self):
return device_state
# return self.state
def set_state(self, state):
# print("set", state)
# self.state = state
global device_state
device_state = state
class SpaceMouseExpert:
def __init__(self, xyz_dims=3, xyz_remap=[0, 1, 2], xyz_scale=[1, 1, 1]):
"""TODO: fill in other params"""
self.xyz_dims = xyz_dims
self.xyz_remap = np.array(xyz_remap)
self.xyz_scale = np.array(xyz_scale)
self.thread = Thread(target = start_server)
self.thread.daemon = True
self.thread.start()
self.device_state = DeviceState()
def get_action(self, obs):
"""Must return (action, valid, reset, accept)"""
state = self.device_state.get_state()
print(state)
if state is None:
return None, False, False, False
dpos, rotation, accept, reset = (
state["dpos"],
state["rotation"],
state["left_click"],
state["right_click"],
)
xyz = dpos[self.xyz_remap] * self.xyz_scale
a = xyz[:self.xyz_dims]
valid = not np.all(np.isclose(a, 0))
return (a, valid, reset, accept)
def start_server():
daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME) # make a Pyro daemon
ns = Pyro4.locateNS() # find the name server
uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object
ns.register("example.greeting", uri) # register the object with a name in the name server
print("Server ready.")
daemon.requestLoop() # start the event loop of the server to wait for calls
if __name__ == "__main__":
expert = SpaceMouseExpert()
for i in range(100):
time.sleep(1)
print(expert.get_action(None))
|
pickletester.py
|
import collections
import copyreg
import dbm
import io
import functools
import os
import math
import pickle
import pickletools
import shutil
import struct
import sys
import threading
import unittest
import weakref
from textwrap import dedent
from http.cookies import SimpleCookie
try:
import _testbuffer
except ImportError:
_testbuffer = None
try:
import numpy as np
except ImportError:
np = None
from test import support
from test.support import (
TestFailed, TESTFN, run_with_locale, no_tracing,
_2G, _4G, bigmemtest, reap_threads, forget,
)
from pickle import bytes_types
requires_32b = unittest.skipUnless(sys.maxsize < 2**32,
"test is only meaningful on 32-bit builds")
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
# kind of outer loop.
protocols = range(pickle.HIGHEST_PROTOCOL + 1)
# Return True if opcode code appears in the pickle, else False.
def opcode_in_pickle(code, pickle):
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
return True
return False
# Return the number of times opcode code appears in pickle.
def count_opcode(code, pickle):
n = 0
for op, dummy, dummy in pickletools.genops(pickle):
if op.code == code.decode("latin-1"):
n += 1
return n
class UnseekableIO(io.BytesIO):
def peek(self, *args):
raise NotImplementedError
def seekable(self):
return False
def seek(self, *args):
raise io.UnsupportedOperation
def tell(self):
raise io.UnsupportedOperation
# We can't very well test the extension registry without putting known stuff
# in it, but we have to be careful to restore its original state. Code
# should do this:
#
# e = ExtensionSaver(extension_code)
# try:
# fiddle w/ the extension registry's stuff for extension_code
# finally:
# e.restore()
class ExtensionSaver:
# Remember current registration for code (if any), and remove it (if
# there is one).
def __init__(self, code):
self.code = code
if code in copyreg._inverted_registry:
self.pair = copyreg._inverted_registry[code]
copyreg.remove_extension(self.pair[0], self.pair[1], code)
else:
self.pair = None
# Restore previous registration for code.
def restore(self):
code = self.code
curpair = copyreg._inverted_registry.get(code)
if curpair is not None:
copyreg.remove_extension(curpair[0], curpair[1], code)
pair = self.pair
if pair is not None:
copyreg.add_extension(pair[0], pair[1], code)
class C:
def __eq__(self, other):
return self.__dict__ == other.__dict__
class D(C):
def __init__(self, arg):
pass
class E(C):
def __getinitargs__(self):
return ()
class H(object):
pass
# Hashable mutable key
class K(object):
def __init__(self, value):
self.value = value
def __reduce__(self):
# Shouldn't support the recursion itself
return K, (self.value,)
import __main__
__main__.C = C
C.__module__ = "__main__"
__main__.D = D
D.__module__ = "__main__"
__main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
class myint(int):
def __init__(self, x):
self.str = str(x)
class initarg(C):
def __init__(self, a, b):
self.a = a
self.b = b
def __getinitargs__(self):
return self.a, self.b
class metaclass(type):
pass
class use_metaclass(object, metaclass=metaclass):
pass
class pickling_metaclass(type):
def __eq__(self, other):
return (type(self) == type(other) and
self.reduce_args == other.reduce_args)
def __reduce__(self):
return (create_dynamic_class, self.reduce_args)
def create_dynamic_class(name, bases):
result = pickling_metaclass(name, bases, dict())
result.reduce_args = (name, bases)
return result
class ZeroCopyBytes(bytes):
readonly = True
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
class ZeroCopyBytearray(bytearray):
readonly = False
c_contiguous = True
f_contiguous = True
zero_copy_reconstruct = True
def __reduce_ex__(self, protocol):
if protocol >= 5:
return type(self)._reconstruct, (pickle.PickleBuffer(self),), None
else:
return type(self)._reconstruct, (bytes(self),)
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, bytes(self))
__str__ = __repr__
@classmethod
def _reconstruct(cls, obj):
with memoryview(obj) as m:
obj = m.obj
if type(obj) is cls:
# Zero-copy
return obj
else:
return cls(obj)
if _testbuffer is not None:
class PicklableNDArray:
# A not-really-zero-copy picklable ndarray, as the ndarray()
# constructor doesn't allow for it
zero_copy_reconstruct = False
def __init__(self, *args, **kwargs):
self.array = _testbuffer.ndarray(*args, **kwargs)
def __getitem__(self, idx):
cls = type(self)
new = cls.__new__(cls)
new.array = self.array[idx]
return new
@property
def readonly(self):
return self.array.readonly
@property
def c_contiguous(self):
return self.array.c_contiguous
@property
def f_contiguous(self):
return self.array.f_contiguous
def __eq__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return (other.array.format == self.array.format and
other.array.shape == self.array.shape and
other.array.strides == self.array.strides and
other.array.readonly == self.array.readonly and
other.array.tobytes() == self.array.tobytes())
def __ne__(self, other):
if not isinstance(other, PicklableNDArray):
return NotImplemented
return not (self == other)
def __repr__(self):
return (f"{type(self)}(shape={self.array.shape},"
f"strides={self.array.strides}, "
f"bytes={self.array.tobytes()})")
def __reduce_ex__(self, protocol):
if not self.array.contiguous:
raise NotImplementedError("Reconstructing a non-contiguous "
"ndarray does not seem possible")
ndarray_kwargs = {"shape": self.array.shape,
"strides": self.array.strides,
"format": self.array.format,
"flags": (0 if self.readonly
else _testbuffer.ND_WRITABLE)}
pb = pickle.PickleBuffer(self.array)
if protocol >= 5:
return (type(self)._reconstruct,
(pb, ndarray_kwargs))
else:
# Need to serialize the bytes in physical order
with pb.raw() as m:
return (type(self)._reconstruct,
(m.tobytes(), ndarray_kwargs))
@classmethod
def _reconstruct(cls, obj, kwargs):
with memoryview(obj) as m:
# For some reason, ndarray() wants a list of integers...
# XXX This only works if format == 'B'
items = list(m.tobytes())
return cls(items, **kwargs)
# DATA0 .. DATA4 are the pickles we expect under the various protocols, for
# the object returned by create_data().
DATA0 = (
b'(lp0\nL0L\naL1L\naF2.0\n'
b'ac__builtin__\ncomple'
b'x\np1\n(F3.0\nF0.0\ntp2\n'
b'Rp3\naL1L\naL-1L\naL255'
b'L\naL-255L\naL-256L\naL'
b'65535L\naL-65535L\naL-'
b'65536L\naL2147483647L'
b'\naL-2147483647L\naL-2'
b'147483648L\na(Vabc\np4'
b'\ng4\nccopy_reg\n_recon'
b'structor\np5\n(c__main'
b'__\nC\np6\nc__builtin__'
b'\nobject\np7\nNtp8\nRp9\n'
b'(dp10\nVfoo\np11\nL1L\ns'
b'Vbar\np12\nL2L\nsbg9\ntp'
b'13\nag13\naL5L\na.'
)
# Disassembly of DATA0
DATA0_DIS = """\
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 0
9: a APPEND
10: L LONG 1
14: a APPEND
15: F FLOAT 2.0
20: a APPEND
21: c GLOBAL '__builtin__ complex'
42: p PUT 1
45: ( MARK
46: F FLOAT 3.0
51: F FLOAT 0.0
56: t TUPLE (MARK at 45)
57: p PUT 2
60: R REDUCE
61: p PUT 3
64: a APPEND
65: L LONG 1
69: a APPEND
70: L LONG -1
75: a APPEND
76: L LONG 255
82: a APPEND
83: L LONG -255
90: a APPEND
91: L LONG -256
98: a APPEND
99: L LONG 65535
107: a APPEND
108: L LONG -65535
117: a APPEND
118: L LONG -65536
127: a APPEND
128: L LONG 2147483647
141: a APPEND
142: L LONG -2147483647
156: a APPEND
157: L LONG -2147483648
171: a APPEND
172: ( MARK
173: V UNICODE 'abc'
178: p PUT 4
181: g GET 4
184: c GLOBAL 'copy_reg _reconstructor'
209: p PUT 5
212: ( MARK
213: c GLOBAL '__main__ C'
225: p PUT 6
228: c GLOBAL '__builtin__ object'
248: p PUT 7
251: N NONE
252: t TUPLE (MARK at 212)
253: p PUT 8
256: R REDUCE
257: p PUT 9
260: ( MARK
261: d DICT (MARK at 260)
262: p PUT 10
266: V UNICODE 'foo'
271: p PUT 11
275: L LONG 1
279: s SETITEM
280: V UNICODE 'bar'
285: p PUT 12
289: L LONG 2
293: s SETITEM
294: b BUILD
295: g GET 9
298: t TUPLE (MARK at 172)
299: p PUT 13
303: a APPEND
304: g GET 13
308: a APPEND
309: L LONG 5
313: a APPEND
314: . STOP
highest protocol among opcodes = 0
"""
DATA1 = (
b']q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c__'
b'builtin__\ncomplex\nq\x01'
b'(G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00t'
b'q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ'
b'\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff'
b'\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00ab'
b'cq\x04h\x04ccopy_reg\n_reco'
b'nstructor\nq\x05(c__main'
b'__\nC\nq\x06c__builtin__\n'
b'object\nq\x07Ntq\x08Rq\t}q\n('
b'X\x03\x00\x00\x00fooq\x0bK\x01X\x03\x00\x00\x00bar'
b'q\x0cK\x02ubh\ttq\rh\rK\x05e.'
)
# Disassembly of DATA1
DATA1_DIS = """\
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 0
6: K BININT1 1
8: G BINFLOAT 2.0
17: c GLOBAL '__builtin__ complex'
38: q BINPUT 1
40: ( MARK
41: G BINFLOAT 3.0
50: G BINFLOAT 0.0
59: t TUPLE (MARK at 40)
60: q BINPUT 2
62: R REDUCE
63: q BINPUT 3
65: K BININT1 1
67: J BININT -1
72: K BININT1 255
74: J BININT -255
79: J BININT -256
84: M BININT2 65535
87: J BININT -65535
92: J BININT -65536
97: J BININT 2147483647
102: J BININT -2147483647
107: J BININT -2147483648
112: ( MARK
113: X BINUNICODE 'abc'
121: q BINPUT 4
123: h BINGET 4
125: c GLOBAL 'copy_reg _reconstructor'
150: q BINPUT 5
152: ( MARK
153: c GLOBAL '__main__ C'
165: q BINPUT 6
167: c GLOBAL '__builtin__ object'
187: q BINPUT 7
189: N NONE
190: t TUPLE (MARK at 152)
191: q BINPUT 8
193: R REDUCE
194: q BINPUT 9
196: } EMPTY_DICT
197: q BINPUT 10
199: ( MARK
200: X BINUNICODE 'foo'
208: q BINPUT 11
210: K BININT1 1
212: X BINUNICODE 'bar'
220: q BINPUT 12
222: K BININT1 2
224: u SETITEMS (MARK at 199)
225: b BUILD
226: h BINGET 9
228: t TUPLE (MARK at 112)
229: q BINPUT 13
231: h BINGET 13
233: K BININT1 5
235: e APPENDS (MARK at 3)
236: . STOP
highest protocol among opcodes = 1
"""
DATA2 = (
b'\x80\x02]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'__builtin__\ncomplex\n'
b'q\x01G@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x86q\x02Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xff'
b'J\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff'
b'\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00a'
b'bcq\x04h\x04c__main__\nC\nq\x05'
b')\x81q\x06}q\x07(X\x03\x00\x00\x00fooq\x08K\x01'
b'X\x03\x00\x00\x00barq\tK\x02ubh\x06tq\nh'
b'\nK\x05e.'
)
# Disassembly of DATA2
DATA2_DIS = """\
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL '__builtin__ complex'
40: q BINPUT 1
42: G BINFLOAT 3.0
51: G BINFLOAT 0.0
60: \x86 TUPLE2
61: q BINPUT 2
63: R REDUCE
64: q BINPUT 3
66: K BININT1 1
68: J BININT -1
73: K BININT1 255
75: J BININT -255
80: J BININT -256
85: M BININT2 65535
88: J BININT -65535
93: J BININT -65536
98: J BININT 2147483647
103: J BININT -2147483647
108: J BININT -2147483648
113: ( MARK
114: X BINUNICODE 'abc'
122: q BINPUT 4
124: h BINGET 4
126: c GLOBAL '__main__ C'
138: q BINPUT 5
140: ) EMPTY_TUPLE
141: \x81 NEWOBJ
142: q BINPUT 6
144: } EMPTY_DICT
145: q BINPUT 7
147: ( MARK
148: X BINUNICODE 'foo'
156: q BINPUT 8
158: K BININT1 1
160: X BINUNICODE 'bar'
168: q BINPUT 9
170: K BININT1 2
172: u SETITEMS (MARK at 147)
173: b BUILD
174: h BINGET 6
176: t TUPLE (MARK at 113)
177: q BINPUT 10
179: h BINGET 10
181: K BININT1 5
183: e APPENDS (MARK at 5)
184: . STOP
highest protocol among opcodes = 2
"""
DATA3 = (
b'\x80\x03]q\x00(K\x00K\x01G@\x00\x00\x00\x00\x00\x00\x00c'
b'builtins\ncomplex\nq\x01G'
b'@\x08\x00\x00\x00\x00\x00\x00G\x00\x00\x00\x00\x00\x00\x00\x00\x86q\x02'
b'Rq\x03K\x01J\xff\xff\xff\xffK\xffJ\x01\xff\xff\xffJ\x00\xff'
b'\xff\xffM\xff\xffJ\x01\x00\xff\xffJ\x00\x00\xff\xffJ\xff\xff\xff\x7f'
b'J\x01\x00\x00\x80J\x00\x00\x00\x80(X\x03\x00\x00\x00abcq'
b'\x04h\x04c__main__\nC\nq\x05)\x81q'
b'\x06}q\x07(X\x03\x00\x00\x00barq\x08K\x02X\x03\x00'
b'\x00\x00fooq\tK\x01ubh\x06tq\nh\nK\x05'
b'e.'
)
# Disassembly of DATA3
DATA3_DIS = """\
0: \x80 PROTO 3
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 0
8: K BININT1 1
10: G BINFLOAT 2.0
19: c GLOBAL 'builtins complex'
37: q BINPUT 1
39: G BINFLOAT 3.0
48: G BINFLOAT 0.0
57: \x86 TUPLE2
58: q BINPUT 2
60: R REDUCE
61: q BINPUT 3
63: K BININT1 1
65: J BININT -1
70: K BININT1 255
72: J BININT -255
77: J BININT -256
82: M BININT2 65535
85: J BININT -65535
90: J BININT -65536
95: J BININT 2147483647
100: J BININT -2147483647
105: J BININT -2147483648
110: ( MARK
111: X BINUNICODE 'abc'
119: q BINPUT 4
121: h BINGET 4
123: c GLOBAL '__main__ C'
135: q BINPUT 5
137: ) EMPTY_TUPLE
138: \x81 NEWOBJ
139: q BINPUT 6
141: } EMPTY_DICT
142: q BINPUT 7
144: ( MARK
145: X BINUNICODE 'bar'
153: q BINPUT 8
155: K BININT1 2
157: X BINUNICODE 'foo'
165: q BINPUT 9
167: K BININT1 1
169: u SETITEMS (MARK at 144)
170: b BUILD
171: h BINGET 6
173: t TUPLE (MARK at 110)
174: q BINPUT 10
176: h BINGET 10
178: K BININT1 5
180: e APPENDS (MARK at 5)
181: . STOP
highest protocol among opcodes = 2
"""
DATA4 = (
b'\x80\x04\x95\xa8\x00\x00\x00\x00\x00\x00\x00]\x94(K\x00K\x01G@'
b'\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\x07'
b'complex\x94\x93\x94G@\x08\x00\x00\x00\x00\x00\x00G'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x86\x94R\x94K\x01J\xff\xff\xff\xffK'
b'\xffJ\x01\xff\xff\xffJ\x00\xff\xff\xffM\xff\xffJ\x01\x00\xff\xffJ'
b'\x00\x00\xff\xffJ\xff\xff\xff\x7fJ\x01\x00\x00\x80J\x00\x00\x00\x80('
b'\x8c\x03abc\x94h\x06\x8c\x08__main__\x94\x8c'
b'\x01C\x94\x93\x94)\x81\x94}\x94(\x8c\x03bar\x94K\x02\x8c'
b'\x03foo\x94K\x01ubh\nt\x94h\x0eK\x05e.'
)
# Disassembly of DATA4
DATA4_DIS = """\
0: \x80 PROTO 4
2: \x95 FRAME 168
11: ] EMPTY_LIST
12: \x94 MEMOIZE
13: ( MARK
14: K BININT1 0
16: K BININT1 1
18: G BINFLOAT 2.0
27: \x8c SHORT_BINUNICODE 'builtins'
37: \x94 MEMOIZE
38: \x8c SHORT_BINUNICODE 'complex'
47: \x94 MEMOIZE
48: \x93 STACK_GLOBAL
49: \x94 MEMOIZE
50: G BINFLOAT 3.0
59: G BINFLOAT 0.0
68: \x86 TUPLE2
69: \x94 MEMOIZE
70: R REDUCE
71: \x94 MEMOIZE
72: K BININT1 1
74: J BININT -1
79: K BININT1 255
81: J BININT -255
86: J BININT -256
91: M BININT2 65535
94: J BININT -65535
99: J BININT -65536
104: J BININT 2147483647
109: J BININT -2147483647
114: J BININT -2147483648
119: ( MARK
120: \x8c SHORT_BINUNICODE 'abc'
125: \x94 MEMOIZE
126: h BINGET 6
128: \x8c SHORT_BINUNICODE '__main__'
138: \x94 MEMOIZE
139: \x8c SHORT_BINUNICODE 'C'
142: \x94 MEMOIZE
143: \x93 STACK_GLOBAL
144: \x94 MEMOIZE
145: ) EMPTY_TUPLE
146: \x81 NEWOBJ
147: \x94 MEMOIZE
148: } EMPTY_DICT
149: \x94 MEMOIZE
150: ( MARK
151: \x8c SHORT_BINUNICODE 'bar'
156: \x94 MEMOIZE
157: K BININT1 2
159: \x8c SHORT_BINUNICODE 'foo'
164: \x94 MEMOIZE
165: K BININT1 1
167: u SETITEMS (MARK at 150)
168: b BUILD
169: h BINGET 10
171: t TUPLE (MARK at 119)
172: \x94 MEMOIZE
173: h BINGET 14
175: K BININT1 5
177: e APPENDS (MARK at 13)
178: . STOP
highest protocol among opcodes = 4
"""
# set([1,2]) pickled from 2.x with protocol 2
DATA_SET = b'\x80\x02c__builtin__\nset\nq\x00]q\x01(K\x01K\x02e\x85q\x02Rq\x03.'
# xrange(5) pickled from 2.x with protocol 2
DATA_XRANGE = b'\x80\x02c__builtin__\nxrange\nq\x00K\x00K\x05K\x01\x87q\x01Rq\x02.'
# a SimpleCookie() object pickled from 2.x with protocol 2
DATA_COOKIE = (b'\x80\x02cCookie\nSimpleCookie\nq\x00)\x81q\x01U\x03key'
b'q\x02cCookie\nMorsel\nq\x03)\x81q\x04(U\x07commentq\x05U'
b'\x00q\x06U\x06domainq\x07h\x06U\x06secureq\x08h\x06U\x07'
b'expiresq\th\x06U\x07max-ageq\nh\x06U\x07versionq\x0bh\x06U'
b'\x04pathq\x0ch\x06U\x08httponlyq\rh\x06u}q\x0e(U\x0b'
b'coded_valueq\x0fU\x05valueq\x10h\x10h\x10h\x02h\x02ubs}q\x11b.')
# set([3]) pickled from 2.x with protocol 2
DATA_SET2 = b'\x80\x02c__builtin__\nset\nq\x00]q\x01K\x03a\x85q\x02Rq\x03.'
python2_exceptions_without_args = (
ArithmeticError,
AssertionError,
AttributeError,
BaseException,
BufferError,
BytesWarning,
DeprecationWarning,
EOFError,
EnvironmentError,
Exception,
FloatingPointError,
FutureWarning,
GeneratorExit,
IOError,
ImportError,
ImportWarning,
IndentationError,
IndexError,
KeyError,
KeyboardInterrupt,
LookupError,
MemoryError,
NameError,
NotImplementedError,
OSError,
OverflowError,
PendingDeprecationWarning,
ReferenceError,
RuntimeError,
RuntimeWarning,
# StandardError is gone in Python 3, we map it to Exception
StopIteration,
SyntaxError,
SyntaxWarning,
SystemError,
SystemExit,
TabError,
TypeError,
UnboundLocalError,
UnicodeError,
UnicodeWarning,
UserWarning,
ValueError,
Warning,
ZeroDivisionError,
)
exception_pickle = b'\x80\x02cexceptions\n?\nq\x00)Rq\x01.'
# UnicodeEncodeError object pickled from 2.x with protocol 2
DATA_UEERR = (b'\x80\x02cexceptions\nUnicodeEncodeError\n'
b'q\x00(U\x05asciiq\x01X\x03\x00\x00\x00fooq\x02K\x00K\x01'
b'U\x03badq\x03tq\x04Rq\x05.')
def create_data():
c = C()
c.foo = 1
c.bar = 2
x = [0, 1, 2.0, 3.0+0j]
# Append some integer test cases at cPickle.c's internal size
# cutoffs.
uint1max = 0xff
uint2max = 0xffff
int4max = 0x7fffffff
x.extend([1, -1,
uint1max, -uint1max, -uint1max-1,
uint2max, -uint2max, -uint2max-1,
int4max, -int4max, -int4max-1])
y = ('abc', 'abc', c, c)
x.append(y)
x.append(y)
x.append(5)
return x
class AbstractUnpickleTests(unittest.TestCase):
# Subclass must define self.loads.
_testdata = create_data()
def assert_is_copy(self, obj, objcopy, msg=None):
"""Utility method to verify if two objects are copies of each others.
"""
if msg is None:
msg = "{!r} is not a copy of {!r}".format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(
hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None),
getattr(objcopy, slot, None), msg=msg)
def check_unpickling_error(self, errors, data):
with self.subTest(data=data), \
self.assertRaises(errors):
try:
self.loads(data)
except BaseException as exc:
if support.verbose > 1:
print('%-32r - %s: %s' %
(data, exc.__class__.__name__, exc))
raise
def test_load_from_data0(self):
self.assert_is_copy(self._testdata, self.loads(DATA0))
def test_load_from_data1(self):
self.assert_is_copy(self._testdata, self.loads(DATA1))
def test_load_from_data2(self):
self.assert_is_copy(self._testdata, self.loads(DATA2))
def test_load_from_data3(self):
self.assert_is_copy(self._testdata, self.loads(DATA3))
def test_load_from_data4(self):
self.assert_is_copy(self._testdata, self.loads(DATA4))
def test_load_classic_instance(self):
# See issue5180. Test loading 2.x pickles that
# contain an instance of old style class.
for X, args in [(C, ()), (D, ('x',)), (E, ())]:
xname = X.__name__.encode('ascii')
# Protocol 0 (text mode pickle):
"""
0: ( MARK
1: i INST '__main__ X' (MARK at 0)
13: p PUT 0
16: ( MARK
17: d DICT (MARK at 16)
18: p PUT 1
21: b BUILD
22: . STOP
"""
pickle0 = (b"(i__main__\n"
b"X\n"
b"p0\n"
b"(dp1\nb.").replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle0))
# Protocol 1 (binary mode pickle)
"""
0: ( MARK
1: c GLOBAL '__main__ X'
13: q BINPUT 0
15: o OBJ (MARK at 0)
16: q BINPUT 1
18: } EMPTY_DICT
19: q BINPUT 2
21: b BUILD
22: . STOP
"""
pickle1 = (b'(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle1))
# Protocol 2 (pickle2 = b'\x80\x02' + pickle1)
"""
0: \x80 PROTO 2
2: ( MARK
3: c GLOBAL '__main__ X'
15: q BINPUT 0
17: o OBJ (MARK at 2)
18: q BINPUT 1
20: } EMPTY_DICT
21: q BINPUT 2
23: b BUILD
24: . STOP
"""
pickle2 = (b'\x80\x02(c__main__\n'
b'X\n'
b'q\x00oq\x01}q\x02b.').replace(b'X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
def test_maxint64(self):
maxint64 = (1 << 63) - 1
data = b'I' + str(maxint64).encode("ascii") + b'\n.'
got = self.loads(data)
self.assert_is_copy(maxint64, got)
# Try too with a bogus literal.
data = b'I' + str(maxint64).encode("ascii") + b'JUNK\n.'
self.check_unpickling_error(ValueError, data)
def test_unpickle_from_2x(self):
# Unpickle non-trivial data from Python 2.x.
loaded = self.loads(DATA_SET)
self.assertEqual(loaded, set([1, 2]))
loaded = self.loads(DATA_XRANGE)
self.assertEqual(type(loaded), type(range(0)))
self.assertEqual(list(loaded), list(range(5)))
loaded = self.loads(DATA_COOKIE)
self.assertEqual(type(loaded), SimpleCookie)
self.assertEqual(list(loaded.keys()), ["key"])
self.assertEqual(loaded["key"].value, "value")
# Exception objects without arguments pickled from 2.x with protocol 2
for exc in python2_exceptions_without_args:
data = exception_pickle.replace(b'?', exc.__name__.encode("ascii"))
loaded = self.loads(data)
self.assertIs(type(loaded), exc)
# StandardError is mapped to Exception, test that separately
loaded = self.loads(exception_pickle.replace(b'?', b'StandardError'))
self.assertIs(type(loaded), Exception)
loaded = self.loads(DATA_UEERR)
self.assertIs(type(loaded), UnicodeEncodeError)
self.assertEqual(loaded.object, "foo")
self.assertEqual(loaded.encoding, "ascii")
self.assertEqual(loaded.start, 0)
self.assertEqual(loaded.end, 1)
self.assertEqual(loaded.reason, "bad")
def test_load_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads(b"S'a\\x00\\xa0'\n.",
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=1)
self.assertEqual(self.loads(b'U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
# From Python 2: pickle.dumps('a\x00\xa0', protocol=2)
self.assertEqual(self.loads(b'\x80\x02U\x03a\x00\xa0.',
encoding="bytes"), b'a\x00\xa0')
def test_load_python2_unicode_as_str(self):
# From Python 2: pickle.dumps(u'π', protocol=0)
self.assertEqual(self.loads(b'V\\u03c0\n.',
encoding='bytes'), 'π')
# From Python 2: pickle.dumps(u'π', protocol=1)
self.assertEqual(self.loads(b'X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
# From Python 2: pickle.dumps(u'π', protocol=2)
self.assertEqual(self.loads(b'\x80\x02X\x02\x00\x00\x00\xcf\x80.',
encoding="bytes"), 'π')
def test_load_long_python2_str_as_bytes(self):
# From Python 2: pickle.dumps('x' * 300, protocol=1)
self.assertEqual(self.loads(pickle.BINSTRING +
struct.pack("<I", 300) +
b'x' * 300 + pickle.STOP,
encoding='bytes'), b'x' * 300)
def test_constants(self):
self.assertIsNone(self.loads(b'N.'))
self.assertIs(self.loads(b'\x88.'), True)
self.assertIs(self.loads(b'\x89.'), False)
self.assertIs(self.loads(b'I01\n.'), True)
self.assertIs(self.loads(b'I00\n.'), False)
def test_empty_bytestring(self):
# issue 11286
empty = self.loads(b'\x80\x03U\x00q\x00.', encoding='koi8-r')
self.assertEqual(empty, '')
def test_short_binbytes(self):
dumped = b'\x80\x03C\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binbytes(self):
dumped = b'\x80\x03B\x04\x00\x00\x00\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
@requires_32b
def test_negative_32b_binbytes(self):
# On 32-bit builds, a BINBYTES of 2**31 or more is refused
dumped = b'\x80\x03B\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_negative_32b_binunicode(self):
# On 32-bit builds, a BINUNICODE of 2**31 or more is refused
dumped = b'\x80\x03X\xff\xff\xff\xffxyzq\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_short_binunicode(self):
dumped = b'\x80\x04\x8c\x04\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_misc_get(self):
self.check_unpickling_error(KeyError, b'g0\np0')
self.assert_is_copy([(100,), (100,)],
self.loads(b'((Kdtp0\nh\x00l.))'))
def test_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), b'\xe2\x82\xac\x00')
def test_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\0\0\0\0\xe2\x82\xac\x00.'
self.assertEqual(self.loads(dumped), '\u20ac\x00')
def test_bytearray8(self):
dumped = b'\x80\x05\x96\x03\x00\x00\x00\x00\x00\x00\x00xxx.'
self.assertEqual(self.loads(dumped), bytearray(b'xxx'))
@requires_32b
def test_large_32b_binbytes8(self):
dumped = b'\x80\x04\x8e\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_bytearray8(self):
dumped = b'\x80\x05\x96\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
@requires_32b
def test_large_32b_binunicode8(self):
dumped = b'\x80\x04\x8d\4\0\0\0\1\0\0\0\xe2\x82\xac\x00.'
self.check_unpickling_error((pickle.UnpicklingError, OverflowError),
dumped)
def test_get(self):
pickled = b'((lp100000\ng100000\nt.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_binget(self):
pickled = b'(]q\xffh\xfft.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_long_binget(self):
pickled = b'(]r\x00\x00\x01\x00j\x00\x00\x01\x00t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_dup(self):
pickled = b'((l2t.'
unpickled = self.loads(pickled)
self.assertEqual(unpickled, ([],)*2)
self.assertIs(unpickled[0], unpickled[1])
def test_negative_put(self):
# Issue #12847
dumped = b'Va\np-1\n.'
self.check_unpickling_error(ValueError, dumped)
@requires_32b
def test_negative_32b_binput(self):
# Issue #12847
dumped = b'\x80\x03X\x01\x00\x00\x00ar\xff\xff\xff\xff.'
self.check_unpickling_error(ValueError, dumped)
def test_badly_escaped_string(self):
self.check_unpickling_error(ValueError, b"S'\\'\n.")
def test_badly_quoted_string(self):
# Issue #17710
badpickles = [b"S'\n.",
b'S"\n.',
b'S\' \n.',
b'S" \n.',
b'S\'"\n.',
b'S"\'\n.',
b"S' ' \n.",
b'S" " \n.',
b"S ''\n.",
b'S ""\n.',
b'S \n.',
b'S\n.',
b'S.']
for p in badpickles:
self.check_unpickling_error(pickle.UnpicklingError, p)
def test_correctly_quoted_string(self):
goodpickles = [(b"S''\n.", ''),
(b'S""\n.', ''),
(b'S"\\n"\n.', '\n'),
(b"S'\\n'\n.", '\n')]
for p, expected in goodpickles:
self.assertEqual(self.loads(p), expected)
def test_frame_readline(self):
pickled = b'\x80\x04\x95\x05\x00\x00\x00\x00\x00\x00\x00I42\n.'
# 0: \x80 PROTO 4
# 2: \x95 FRAME 5
# 11: I INT 42
# 15: . STOP
self.assertEqual(self.loads(pickled), 42)
def test_compat_unpickle(self):
# xrange(1, 7)
pickled = b'\x80\x02c__builtin__\nxrange\nK\x01K\x07K\x01\x87R.'
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), range)
self.assertEqual(unpickled, range(1, 7))
self.assertEqual(list(unpickled), [1, 2, 3, 4, 5, 6])
# reduce
pickled = b'\x80\x02c__builtin__\nreduce\n.'
self.assertIs(self.loads(pickled), functools.reduce)
# whichdb.whichdb
pickled = b'\x80\x02cwhichdb\nwhichdb\n.'
self.assertIs(self.loads(pickled), dbm.whichdb)
# Exception(), StandardError()
for name in (b'Exception', b'StandardError'):
pickled = (b'\x80\x02cexceptions\n' + name + b'\nU\x03ugh\x85R.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), Exception)
self.assertEqual(str(unpickled), 'ugh')
# UserDict.UserDict({1: 2}), UserDict.IterableUserDict({1: 2})
for name in (b'UserDict', b'IterableUserDict'):
pickled = (b'\x80\x02(cUserDict\n' + name +
b'\no}U\x04data}K\x01K\x02ssb.')
unpickled = self.loads(pickled)
self.assertIs(type(unpickled), collections.UserDict)
self.assertEqual(unpickled, collections.UserDict({1: 2}))
def test_bad_stack(self):
badpickles = [
b'.', # STOP
b'0', # POP
b'1', # POP_MARK
b'2', # DUP
b'(2',
b'R', # REDUCE
b')R',
b'a', # APPEND
b'Na',
b'b', # BUILD
b'Nb',
b'd', # DICT
b'e', # APPENDS
b'(e',
b'ibuiltins\nlist\n', # INST
b'l', # LIST
b'o', # OBJ
b'(o',
b'p1\n', # PUT
b'q\x00', # BINPUT
b'r\x00\x00\x00\x00', # LONG_BINPUT
b's', # SETITEM
b'Ns',
b'NNs',
b't', # TUPLE
b'u', # SETITEMS
b'(u',
b'}(Nu',
b'\x81', # NEWOBJ
b')\x81',
b'\x85', # TUPLE1
b'\x86', # TUPLE2
b'N\x86',
b'\x87', # TUPLE3
b'N\x87',
b'NN\x87',
b'\x90', # ADDITEMS
b'(\x90',
b'\x91', # FROZENSET
b'\x92', # NEWOBJ_EX
b')}\x92',
b'\x93', # STACK_GLOBAL
b'Vlist\n\x93',
b'\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_bad_mark(self):
badpickles = [
b'N(.', # STOP
b'N(2', # DUP
b'cbuiltins\nlist\n)(R', # REDUCE
b'cbuiltins\nlist\n()R',
b']N(a', # APPEND
# BUILD
b'cbuiltins\nValueError\n)R}(b',
b'cbuiltins\nValueError\n)R(}b',
b'(Nd', # DICT
b'N(p1\n', # PUT
b'N(q\x00', # BINPUT
b'N(r\x00\x00\x00\x00', # LONG_BINPUT
b'}NN(s', # SETITEM
b'}N(Ns',
b'}(NNs',
b'}((u', # SETITEMS
b'cbuiltins\nlist\n)(\x81', # NEWOBJ
b'cbuiltins\nlist\n()\x81',
b'N(\x85', # TUPLE1
b'NN(\x86', # TUPLE2
b'N(N\x86',
b'NNN(\x87', # TUPLE3
b'NN(N\x87',
b'N(NN\x87',
b']((\x90', # ADDITEMS
# NEWOBJ_EX
b'cbuiltins\nlist\n)}(\x92',
b'cbuiltins\nlist\n)(}\x92',
b'cbuiltins\nlist\n()}\x92',
# STACK_GLOBAL
b'Vbuiltins\n(Vlist\n\x93',
b'Vbuiltins\nVlist\n(\x93',
b'N(\x94', # MEMOIZE
]
for p in badpickles:
self.check_unpickling_error(self.bad_stack_errors, p)
def test_truncated_data(self):
self.check_unpickling_error(EOFError, b'')
self.check_unpickling_error(EOFError, b'N')
badpickles = [
b'B', # BINBYTES
b'B\x03\x00\x00',
b'B\x03\x00\x00\x00',
b'B\x03\x00\x00\x00ab',
b'C', # SHORT_BINBYTES
b'C\x03',
b'C\x03ab',
b'F', # FLOAT
b'F0.0',
b'F0.00',
b'G', # BINFLOAT
b'G\x00\x00\x00\x00\x00\x00\x00',
b'I', # INT
b'I0',
b'J', # BININT
b'J\x00\x00\x00',
b'K', # BININT1
b'L', # LONG
b'L0',
b'L10',
b'L0L',
b'L10L',
b'M', # BININT2
b'M\x00',
# b'P', # PERSID
# b'Pabc',
b'S', # STRING
b"S'abc'",
b'T', # BINSTRING
b'T\x03\x00\x00',
b'T\x03\x00\x00\x00',
b'T\x03\x00\x00\x00ab',
b'U', # SHORT_BINSTRING
b'U\x03',
b'U\x03ab',
b'V', # UNICODE
b'Vabc',
b'X', # BINUNICODE
b'X\x03\x00\x00',
b'X\x03\x00\x00\x00',
b'X\x03\x00\x00\x00ab',
b'(c', # GLOBAL
b'(cbuiltins',
b'(cbuiltins\n',
b'(cbuiltins\nlist',
b'Ng', # GET
b'Ng0',
b'(i', # INST
b'(ibuiltins',
b'(ibuiltins\n',
b'(ibuiltins\nlist',
b'Nh', # BINGET
b'Nj', # LONG_BINGET
b'Nj\x00\x00\x00',
b'Np', # PUT
b'Np0',
b'Nq', # BINPUT
b'Nr', # LONG_BINPUT
b'Nr\x00\x00\x00',
b'\x80', # PROTO
b'\x82', # EXT1
b'\x83', # EXT2
b'\x84\x01',
b'\x84', # EXT4
b'\x84\x01\x00\x00',
b'\x8a', # LONG1
b'\x8b', # LONG4
b'\x8b\x00\x00\x00',
b'\x8c', # SHORT_BINUNICODE
b'\x8c\x03',
b'\x8c\x03ab',
b'\x8d', # BINUNICODE8
b'\x8d\x03\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8d\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x8e', # BINBYTES8
b'\x8e\x03\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x8e\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x96', # BYTEARRAY8
b'\x96\x03\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00',
b'\x96\x03\x00\x00\x00\x00\x00\x00\x00ab',
b'\x95', # FRAME
b'\x95\x02\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00',
b'\x95\x02\x00\x00\x00\x00\x00\x00\x00N',
]
for p in badpickles:
self.check_unpickling_error(self.truncated_errors, p)
@reap_threads
def test_unpickle_module_race(self):
# https://bugs.python.org/issue34572
locker_module = dedent("""
import threading
barrier = threading.Barrier(2)
""")
locking_import_module = dedent("""
import locker
locker.barrier.wait()
class ToBeUnpickled(object):
pass
""")
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
sys.path.insert(0, TESTFN)
self.addCleanup(sys.path.remove, TESTFN)
with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
f.write(locker_module.encode('utf-8'))
with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
f.write(locking_import_module.encode('utf-8'))
self.addCleanup(forget, "locker")
self.addCleanup(forget, "locking_import")
import locker
pickle_bytes = (
b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')
# Then try to unpickle two of these simultaneously
# One of them will cause the module import, and we want it to block
# until the other one either:
# - fails (before the patch for this issue)
# - blocks on the import lock for the module, as it should
results = []
barrier = threading.Barrier(3)
def t():
# This ensures the threads have all started
# presumably barrier release is faster than thread startup
barrier.wait()
results.append(pickle.loads(pickle_bytes))
t1 = threading.Thread(target=t)
t2 = threading.Thread(target=t)
t1.start()
t2.start()
barrier.wait()
# could have delay here
locker.barrier.wait()
t1.join()
t2.join()
from locking_import import ToBeUnpickled
self.assertEqual(
[type(x) for x in results],
[ToBeUnpickled] * 2)
class AbstractPickleTests(unittest.TestCase):
# Subclass must define self.dumps, self.loads.
optimized = False
_testdata = AbstractUnpickleTests._testdata
def setUp(self):
pass
assert_is_copy = AbstractUnpickleTests.assert_is_copy
def test_misc(self):
# test various datatypes not tested by testdata
for proto in protocols:
x = myint(4)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = (1, ())
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
x = initarg(1, x)
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
# XXX test __reduce__ protocol?
def test_roundtrip_equality(self):
expected = self._testdata
for proto in protocols:
s = self.dumps(expected, proto)
got = self.loads(s)
self.assert_is_copy(expected, got)
# There are gratuitous differences between pickles produced by
# pickle and cPickle, largely because cPickle starts PUT indices at
# 1 and pickle starts them at 0. See XXX comment in cPickle's put2() --
# there's a comment with an exclamation point there whose meaning
# is a mystery. cPickle also suppresses PUT for objects with a refcount
# of 1.
def dont_test_disassembly(self):
from io import StringIO
from pickletools import dis
for proto, expected in (0, DATA0_DIS), (1, DATA1_DIS):
s = self.dumps(self._testdata, proto)
filelike = StringIO()
dis(s, out=filelike)
got = filelike.getvalue()
self.assertEqual(expected, got)
def test_recursive_list(self):
l = []
l.append(l)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_tuple_and_list(self):
t = ([],)
t[0].append(t)
for proto in protocols:
s = self.dumps(t, proto)
x = self.loads(s)
self.assertIsInstance(x, tuple)
self.assertEqual(len(x), 1)
self.assertIsInstance(x[0], list)
self.assertEqual(len(x[0]), 1)
self.assertIs(x[0][0], x)
def test_recursive_dict(self):
d = {}
d[1] = d
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_key(self):
d = {}
k = K(d)
d[k] = 1
for proto in protocols:
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, dict)
self.assertEqual(len(x.keys()), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_set(self):
y = set()
k = K(y)
y.add(k)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, set)
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], K)
self.assertIs(list(x)[0].value, x)
def test_recursive_list_subclass(self):
y = MyList()
y.append(y)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, MyList)
self.assertEqual(len(x), 1)
self.assertIs(x[0], x)
def test_recursive_dict_subclass(self):
d = MyDict()
d[1] = d
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(list(x.keys()), [1])
self.assertIs(x[1], x)
def test_recursive_dict_subclass_key(self):
d = MyDict()
k = K(d)
d[k] = 1
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = self.dumps(d, proto)
x = self.loads(s)
self.assertIsInstance(x, MyDict)
self.assertEqual(len(list(x.keys())), 1)
self.assertIsInstance(list(x.keys())[0], K)
self.assertIs(list(x.keys())[0].value, x)
def test_recursive_inst(self):
i = C()
i.attr = i
for proto in protocols:
s = self.dumps(i, proto)
x = self.loads(s)
self.assertIsInstance(x, C)
self.assertEqual(dir(x), dir(i))
self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
d = {1:l}
i = C()
i.attr = d
l.append(i)
for proto in protocols:
s = self.dumps(l, proto)
x = self.loads(s)
self.assertIsInstance(x, list)
self.assertEqual(len(x), 1)
self.assertEqual(dir(x[0]), dir(i))
self.assertEqual(list(x[0].attr.keys()), [1])
self.assertTrue(x[0].attr[1] is x)
def check_recursive_collection_and_inst(self, factory):
h = H()
y = factory([h])
h.attr = y
for proto in protocols:
s = self.dumps(y, proto)
x = self.loads(s)
self.assertIsInstance(x, type(y))
self.assertEqual(len(x), 1)
self.assertIsInstance(list(x)[0], H)
self.assertIs(list(x)[0].attr, x)
def test_recursive_list_and_inst(self):
self.check_recursive_collection_and_inst(list)
def test_recursive_tuple_and_inst(self):
self.check_recursive_collection_and_inst(tuple)
def test_recursive_dict_and_inst(self):
self.check_recursive_collection_and_inst(dict.fromkeys)
def test_recursive_set_and_inst(self):
self.check_recursive_collection_and_inst(set)
def test_recursive_frozenset_and_inst(self):
self.check_recursive_collection_and_inst(frozenset)
def test_recursive_list_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyList)
def test_recursive_tuple_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyTuple)
def test_recursive_dict_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyDict.fromkeys)
def test_recursive_set_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MySet)
def test_recursive_frozenset_subclass_and_inst(self):
self.check_recursive_collection_and_inst(MyFrozenSet)
def test_unicode(self):
endcases = ['', '<\\u>', '<\\\u1234>', '<\n>',
'<\\>', '<\\\U00012345>',
# surrogates
'<\udc80>']
for proto in protocols:
for u in endcases:
p = self.dumps(u, proto)
u2 = self.loads(p)
self.assert_is_copy(u, u2)
def test_unicode_high_plane(self):
t = '\U00012345'
for proto in protocols:
p = self.dumps(t, proto)
t2 = self.loads(p)
self.assert_is_copy(t, t2)
def test_bytes(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
for s in [bytes([i, i]) for i in range(256)]:
p = self.dumps(s, proto)
self.assert_is_copy(s, self.loads(p))
def test_bytearray(self):
for proto in protocols:
for s in b'', b'xyz', b'xyz'*100:
b = bytearray(s)
p = self.dumps(b, proto)
bb = self.loads(p)
self.assertIsNot(bb, b)
self.assert_is_copy(b, bb)
if proto <= 3:
# bytearray is serialized using a global reference
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.GLOBAL, p))
elif proto == 4:
self.assertIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.STACK_GLOBAL, p))
elif proto == 5:
self.assertNotIn(b'bytearray', p)
self.assertTrue(opcode_in_pickle(pickle.BYTEARRAY8, p))
def test_ints(self):
for proto in protocols:
n = sys.maxsize
while n:
for expected in (-n, n):
s = self.dumps(expected, proto)
n2 = self.loads(s)
self.assert_is_copy(expected, n2)
n = n >> 1
def test_long(self):
for proto in protocols:
# 256 bytes is where LONG4 begins.
for nbits in 1, 8, 8*254, 8*255, 8*256, 8*257:
nbase = 1 << nbits
for npos in nbase-1, nbase, nbase+1:
for n in npos, -npos:
pickle = self.dumps(n, proto)
got = self.loads(pickle)
self.assert_is_copy(n, got)
# Try a monster. This is quadratic-time in protos 0 & 1, so don't
# bother with those.
nbase = int("deadbeeffeedface", 16)
nbase += nbase << 1000000
for n in nbase, -nbase:
p = self.dumps(n, 2)
got = self.loads(p)
# assert_is_copy is very expensive here as it precomputes
# a failure message by computing the repr() of n and got,
# we just do the check ourselves.
self.assertIs(type(got), int)
self.assertEqual(n, got)
def test_float(self):
test_values = [0.0, 4.94e-324, 1e-310, 7e-308, 6.626e-34, 0.1, 0.5,
3.14, 263.44582062374053, 6.022e23, 1e30]
test_values = test_values + [-x for x in test_values]
for proto in protocols:
for value in test_values:
pickle = self.dumps(value, proto)
got = self.loads(pickle)
self.assert_is_copy(value, got)
@run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_float_format(self):
# make sure that floats are formatted locale independent with proto 0
self.assertEqual(self.dumps(1.2, 0)[0:3], b'F1.')
def test_reduce(self):
for proto in protocols:
inst = AAA()
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, REDUCE_A)
def test_getinitargs(self):
for proto in protocols:
inst = initarg(1, 2)
dumped = self.dumps(inst, proto)
loaded = self.loads(dumped)
self.assert_is_copy(inst, loaded)
def test_metaclass(self):
a = use_metaclass()
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a.__class__, b.__class__)
def test_dynamic_class(self):
a = create_dynamic_class("my_dynamic_class", (object,))
copyreg.pickle(pickling_metaclass, pickling_metaclass.__reduce__)
for proto in protocols:
s = self.dumps(a, proto)
b = self.loads(s)
self.assertEqual(a, b)
self.assertIs(type(a), type(b))
def test_structseq(self):
import time
import os
t = time.localtime()
for proto in protocols:
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
t = os.stat(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
if hasattr(os, "statvfs"):
t = os.statvfs(os.curdir)
s = self.dumps(t, proto)
u = self.loads(s)
self.assert_is_copy(t, u)
def test_ellipsis(self):
for proto in protocols:
s = self.dumps(..., proto)
u = self.loads(s)
self.assertIs(..., u)
def test_notimplemented(self):
for proto in protocols:
s = self.dumps(NotImplemented, proto)
u = self.loads(s)
self.assertIs(NotImplemented, u)
def test_singleton_types(self):
# Issue #6477: Test that types of built-in singletons can be pickled.
singletons = [None, ..., NotImplemented]
for singleton in singletons:
for proto in protocols:
s = self.dumps(type(singleton), proto)
u = self.loads(s)
self.assertIs(type(singleton), u)
# Tests for protocol 2
def test_proto(self):
for proto in protocols:
pickled = self.dumps(None, proto)
if proto >= 2:
proto_header = pickle.PROTO + bytes([proto])
self.assertTrue(pickled.startswith(proto_header))
else:
self.assertEqual(count_opcode(pickle.PROTO, pickled), 0)
oob = protocols[-1] + 1 # a future protocol
build_none = pickle.NONE + pickle.STOP
badpickle = pickle.PROTO + bytes([oob]) + build_none
try:
self.loads(badpickle)
except ValueError as err:
self.assertIn("unsupported pickle protocol", str(err))
else:
self.fail("expected bad protocol number to raise ValueError")
def test_long1(self):
x = 12345678910111213141516178920
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG1, s), proto >= 2)
def test_long4(self):
x = 12345678910111213141516178920 << (256*8)
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
self.assertEqual(opcode_in_pickle(pickle.LONG4, s), proto >= 2)
def test_short_tuples(self):
# Map (proto, len(tuple)) to expected opcode.
expected_opcode = {(0, 0): pickle.TUPLE,
(0, 1): pickle.TUPLE,
(0, 2): pickle.TUPLE,
(0, 3): pickle.TUPLE,
(0, 4): pickle.TUPLE,
(1, 0): pickle.EMPTY_TUPLE,
(1, 1): pickle.TUPLE,
(1, 2): pickle.TUPLE,
(1, 3): pickle.TUPLE,
(1, 4): pickle.TUPLE,
(2, 0): pickle.EMPTY_TUPLE,
(2, 1): pickle.TUPLE1,
(2, 2): pickle.TUPLE2,
(2, 3): pickle.TUPLE3,
(2, 4): pickle.TUPLE,
(3, 0): pickle.EMPTY_TUPLE,
(3, 1): pickle.TUPLE1,
(3, 2): pickle.TUPLE2,
(3, 3): pickle.TUPLE3,
(3, 4): pickle.TUPLE,
}
a = ()
b = (1,)
c = (1, 2)
d = (1, 2, 3)
e = (1, 2, 3, 4)
for proto in protocols:
for x in a, b, c, d, e:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
expected = expected_opcode[min(proto, 3), len(x)]
self.assertTrue(opcode_in_pickle(expected, s))
def test_singletons(self):
# Map (proto, singleton) to expected opcode.
expected_opcode = {(0, None): pickle.NONE,
(1, None): pickle.NONE,
(2, None): pickle.NONE,
(3, None): pickle.NONE,
(0, True): pickle.INT,
(1, True): pickle.INT,
(2, True): pickle.NEWTRUE,
(3, True): pickle.NEWTRUE,
(0, False): pickle.INT,
(1, False): pickle.INT,
(2, False): pickle.NEWFALSE,
(3, False): pickle.NEWFALSE,
}
for proto in protocols:
for x in None, False, True:
s = self.dumps(x, proto)
y = self.loads(s)
self.assertTrue(x is y, (proto, x, s, y))
expected = expected_opcode[min(proto, 3), x]
self.assertTrue(opcode_in_pickle(expected, s))
def test_newobj_tuple(self):
x = MyTuple([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_list(self):
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_newobj_generic(self):
for proto in protocols:
for C in myclasses:
B = C.__base__
x = C(C.sample)
x.foo = 42
s = self.dumps(x, proto)
y = self.loads(s)
detail = (proto, C, B, x, y, type(y))
self.assert_is_copy(x, y) # XXX revisit
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_proxies(self):
# NEWOBJ should use the __class__ rather than the raw type
classes = myclasses[:]
# Cannot create weakproxies to these classes
for c in (MyInt, MyTuple):
classes.remove(c)
for proto in protocols:
for C in classes:
B = C.__base__
x = C(C.sample)
x.foo = 42
p = weakref.proxy(x)
s = self.dumps(p, proto)
y = self.loads(s)
self.assertEqual(type(y), type(x)) # rather than type(p)
detail = (proto, C, B, x, y, type(y))
self.assertEqual(B(x), B(y), detail)
self.assertEqual(x.__dict__, y.__dict__, detail)
def test_newobj_not_class(self):
# Issue 24552
global SimpleNewObj
save = SimpleNewObj
o = SimpleNewObj.__new__(SimpleNewObj)
b = self.dumps(o, 4)
try:
SimpleNewObj = 42
self.assertRaises((TypeError, pickle.UnpicklingError), self.loads, b)
finally:
SimpleNewObj = save
# Register a type with copyreg, with extension code extcode. Pickle
# an object of that type. Check that the resulting pickle uses opcode
# (EXT[124]) under proto 2, and not in proto 1.
def produce_global_ext(self, extcode, opcode):
e = ExtensionSaver(extcode)
try:
copyreg.add_extension(__name__, "MyList", extcode)
x = MyList([1, 2, 3])
x.foo = 42
x.bar = "hello"
# Dump using protocol 1 for comparison.
s1 = self.dumps(x, 1)
self.assertIn(__name__.encode("utf-8"), s1)
self.assertIn(b"MyList", s1)
self.assertFalse(opcode_in_pickle(opcode, s1))
y = self.loads(s1)
self.assert_is_copy(x, y)
# Dump using protocol 2 for test.
s2 = self.dumps(x, 2)
self.assertNotIn(__name__.encode("utf-8"), s2)
self.assertNotIn(b"MyList", s2)
self.assertEqual(opcode_in_pickle(opcode, s2), True, repr(s2))
y = self.loads(s2)
self.assert_is_copy(x, y)
finally:
e.restore()
def test_global_ext1(self):
self.produce_global_ext(0x00000001, pickle.EXT1) # smallest EXT1 code
self.produce_global_ext(0x000000ff, pickle.EXT1) # largest EXT1 code
def test_global_ext2(self):
self.produce_global_ext(0x00000100, pickle.EXT2) # smallest EXT2 code
self.produce_global_ext(0x0000ffff, pickle.EXT2) # largest EXT2 code
self.produce_global_ext(0x0000abcd, pickle.EXT2) # check endianness
def test_global_ext4(self):
self.produce_global_ext(0x00010000, pickle.EXT4) # smallest EXT4 code
self.produce_global_ext(0x7fffffff, pickle.EXT4) # largest EXT4 code
self.produce_global_ext(0x12abcdef, pickle.EXT4) # check endianness
def test_list_chunking(self):
n = 10 # too small to chunk
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
self.assertEqual(num_appends, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = list(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_appends = count_opcode(pickle.APPENDS, s)
if proto == 0:
self.assertEqual(num_appends, 0)
else:
self.assertTrue(num_appends >= 2)
def test_dict_chunking(self):
n = 10 # too small to chunk
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
self.assertIsInstance(s, bytes_types)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
self.assertEqual(num_setitems, proto > 0)
n = 2500 # expect at least two chunks when proto > 0
x = dict.fromkeys(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_setitems = count_opcode(pickle.SETITEMS, s)
if proto == 0:
self.assertEqual(num_setitems, 0)
else:
self.assertTrue(num_setitems >= 2)
def test_set_chunking(self):
n = 10 # too small to chunk
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertEqual(num_additems, 1)
n = 2500 # expect at least two chunks when proto >= 4
x = set(range(n))
for proto in protocols:
s = self.dumps(x, proto)
y = self.loads(s)
self.assert_is_copy(x, y)
num_additems = count_opcode(pickle.ADDITEMS, s)
if proto < 4:
self.assertEqual(num_additems, 0)
else:
self.assertGreaterEqual(num_additems, 2)
def test_simple_newobj(self):
x = SimpleNewObj.__new__(SimpleNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
else:
self.assertIn(b'M\xce\xfa', s) # BININT2
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj(self):
x = ComplexNewObj.__new__(ComplexNewObj, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ, s),
2 <= proto)
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ_EX, s))
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_complex_newobj_ex(self):
x = ComplexNewObjEx.__new__(ComplexNewObjEx, 0xface) # avoid __init__
x.abc = 666
for proto in protocols:
with self.subTest(proto=proto):
s = self.dumps(x, proto)
if proto < 1:
self.assertIn(b'\nI64206', s) # INT
elif proto < 2:
self.assertIn(b'M\xce\xfa', s) # BININT2
elif proto < 4:
self.assertIn(b'X\x04\x00\x00\x00FACE', s) # BINUNICODE
else:
self.assertIn(b'\x8c\x04FACE', s) # SHORT_BINUNICODE
self.assertFalse(opcode_in_pickle(pickle.NEWOBJ, s))
self.assertEqual(opcode_in_pickle(pickle.NEWOBJ_EX, s),
4 <= proto)
y = self.loads(s) # will raise TypeError if __init__ called
self.assert_is_copy(x, y)
def test_newobj_list_slots(self):
x = SlotList([1, 2, 3])
x.foo = 42
x.bar = "hello"
s = self.dumps(x, 2)
y = self.loads(s)
self.assert_is_copy(x, y)
def test_reduce_overrides_default_reduce_ex(self):
for proto in protocols:
x = REX_one()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 0)
def test_reduce_ex_called(self):
for proto in protocols:
x = REX_two()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
x = BadGetattr()
for proto in protocols:
self.assertRaises(RuntimeError, self.dumps, x, proto)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Python implementation is less strict and also accepts iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except pickle.PicklingError:
pass
try:
self.dumps(D(), proto)
except pickle.PicklingError:
pass
def test_many_puts_and_gets(self):
# Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in range(100))
large_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
with self.subTest(proto=proto):
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assert_is_copy(obj, loaded)
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
def test_pickle_to_2x(self):
# Pickle non-trivial data with protocol 2, expecting that it yields
# the same result as Python 2.x did.
# NOTE: this test is a bit too strong since we can produce different
# bytecode that 2.x will still understand.
dumped = self.dumps(range(5), 2)
self.assertEqual(dumped, DATA_XRANGE)
dumped = self.dumps(set([3]), 2)
self.assertEqual(dumped, DATA_SET2)
def test_large_pickles(self):
# Test the correctness of internal buffering routines when handling
# large data.
for proto in protocols:
data = (1, min, b'xy' * (30 * 1024), len)
dumped = self.dumps(data, proto)
loaded = self.loads(dumped)
self.assertEqual(len(loaded), len(data))
self.assertEqual(loaded, data)
def test_int_pickling_efficiency(self):
# Test compacity of int representation (see issue #12744)
for proto in protocols:
with self.subTest(proto=proto):
pickles = [self.dumps(2**n, proto) for n in range(70)]
sizes = list(map(len, pickles))
# the size function is monotonic
self.assertEqual(sorted(sizes), sizes)
if proto >= 2:
for p in pickles:
self.assertFalse(opcode_in_pickle(pickle.LONG, p))
def _check_pickling_with_opcode(self, obj, opcode, proto):
pickled = self.dumps(obj, proto)
self.assertTrue(opcode_in_pickle(opcode, pickled))
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
def test_appends_on_non_lists(self):
# Issue #17720
obj = REX_six([1, 2, 3])
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.APPEND, proto)
else:
self._check_pickling_with_opcode(obj, pickle.APPENDS, proto)
def test_setitems_on_non_dicts(self):
obj = REX_seven({1: -1, 2: -2, 3: -3})
for proto in protocols:
if proto == 0:
self._check_pickling_with_opcode(obj, pickle.SETITEM, proto)
else:
self._check_pickling_with_opcode(obj, pickle.SETITEMS, proto)
# Exercise framing (proto >= 4) for significant workloads
FRAME_SIZE_MIN = 4
FRAME_SIZE_TARGET = 64 * 1024
def check_frame_opcodes(self, pickled):
"""
Check the arguments of FRAME opcodes in a protocol 4+ pickle.
Note that binary objects that are larger than FRAME_SIZE_TARGET are not
framed by default and are therefore considered a frame by themselves in
the following consistency check.
"""
frame_end = frameless_start = None
frameless_opcodes = {'BINBYTES', 'BINUNICODE', 'BINBYTES8',
'BINUNICODE8', 'BYTEARRAY8'}
for op, arg, pos in pickletools.genops(pickled):
if frame_end is not None:
self.assertLessEqual(pos, frame_end)
if pos == frame_end:
frame_end = None
if frame_end is not None: # framed
self.assertNotEqual(op.name, 'FRAME')
if op.name in frameless_opcodes:
# Only short bytes and str objects should be written
# in a frame
self.assertLessEqual(len(arg), self.FRAME_SIZE_TARGET)
else: # not framed
if (op.name == 'FRAME' or
(op.name in frameless_opcodes and
len(arg) > self.FRAME_SIZE_TARGET)):
# Frame or large bytes or str object
if frameless_start is not None:
# Only short data should be written outside of a frame
self.assertLess(pos - frameless_start,
self.FRAME_SIZE_MIN)
frameless_start = None
elif frameless_start is None and op.name != 'PROTO':
frameless_start = pos
if op.name == 'FRAME':
self.assertGreaterEqual(arg, self.FRAME_SIZE_MIN)
frame_end = pos + 9 + arg
pos = len(pickled)
if frame_end is not None:
self.assertEqual(frame_end, pos)
elif frameless_start is not None:
self.assertLess(pos - frameless_start, self.FRAME_SIZE_MIN)
def test_framing_many_objects(self):
obj = list(range(10**5))
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
pickled = self.dumps(obj, proto)
unpickled = self.loads(pickled)
self.assertEqual(obj, unpickled)
bytes_per_frame = (len(pickled) /
count_opcode(pickle.FRAME, pickled))
self.assertGreater(bytes_per_frame,
self.FRAME_SIZE_TARGET / 2)
self.assertLessEqual(bytes_per_frame,
self.FRAME_SIZE_TARGET * 1)
self.check_frame_opcodes(pickled)
def test_framing_large_objects(self):
N = 1024 * 1024
small_items = [[i] for i in range(10)]
obj = [b'x' * N, *small_items, b'y' * N, 'z' * N]
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
for fast in [False, True]:
with self.subTest(proto=proto, fast=fast):
if not fast:
# fast=False by default.
# This covers in-memory pickling with pickle.dumps().
pickled = self.dumps(obj, proto)
else:
# Pickler is required when fast=True.
if not hasattr(self, 'pickler'):
continue
buf = io.BytesIO()
pickler = self.pickler(buf, protocol=proto)
pickler.fast = fast
pickler.dump(obj)
pickled = buf.getvalue()
unpickled = self.loads(pickled)
# More informative error message in case of failure.
self.assertEqual([len(x) for x in obj],
[len(x) for x in unpickled])
# Perform full equality check if the lengths match.
self.assertEqual(obj, unpickled)
n_frames = count_opcode(pickle.FRAME, pickled)
# A single frame for small objects between
# first two large objects.
self.assertEqual(n_frames, 1)
self.check_frame_opcodes(pickled)
def test_optional_frames(self):
if pickle.HIGHEST_PROTOCOL < 4:
return
def remove_frames(pickled, keep_frame=None):
"""Remove frame opcodes from the given pickle."""
frame_starts = []
# 1 byte for the opcode and 8 for the argument
frame_opcode_size = 9
for opcode, _, pos in pickletools.genops(pickled):
if opcode.name == 'FRAME':
frame_starts.append(pos)
newpickle = bytearray()
last_frame_end = 0
for i, pos in enumerate(frame_starts):
if keep_frame and keep_frame(i):
continue
newpickle += pickled[last_frame_end:pos]
last_frame_end = pos + frame_opcode_size
newpickle += pickled[last_frame_end:]
return newpickle
frame_size = self.FRAME_SIZE_TARGET
num_frames = 20
# Large byte objects (dict values) intermittent with small objects
# (dict keys)
for bytes_type in (bytes, bytearray):
obj = {i: bytes_type([i]) * frame_size for i in range(num_frames)}
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
pickled = self.dumps(obj, proto)
frameless_pickle = remove_frames(pickled)
self.assertEqual(count_opcode(pickle.FRAME, frameless_pickle), 0)
self.assertEqual(obj, self.loads(frameless_pickle))
some_frames_pickle = remove_frames(pickled, lambda i: i % 2)
self.assertLess(count_opcode(pickle.FRAME, some_frames_pickle),
count_opcode(pickle.FRAME, pickled))
self.assertEqual(obj, self.loads(some_frames_pickle))
def test_framed_write_sizes_with_delayed_writer(self):
class ChunkAccumulator:
"""Accumulate pickler output in a list of raw chunks."""
def __init__(self):
self.chunks = []
def write(self, chunk):
self.chunks.append(chunk)
def concatenate_chunks(self):
return b"".join(self.chunks)
for proto in range(4, pickle.HIGHEST_PROTOCOL + 1):
objects = [(str(i).encode('ascii'), i % 42, {'i': str(i)})
for i in range(int(1e4))]
# Add a large unique ASCII string
objects.append('0123456789abcdef' *
(self.FRAME_SIZE_TARGET // 16 + 1))
# Protocol 4 packs groups of small objects into frames and issues
# calls to write only once or twice per frame:
# The C pickler issues one call to write per-frame (header and
# contents) while Python pickler issues two calls to write: one for
# the frame header and one for the frame binary contents.
writer = ChunkAccumulator()
self.pickler(writer, proto).dump(objects)
# Actually read the binary content of the chunks after the end
# of the call to dump: any memoryview passed to write should not
# be released otherwise this delayed access would not be possible.
pickled = writer.concatenate_chunks()
reconstructed = self.loads(pickled)
self.assertEqual(reconstructed, objects)
self.assertGreater(len(writer.chunks), 1)
# memoryviews should own the memory.
del objects
support.gc_collect()
self.assertEqual(writer.concatenate_chunks(), pickled)
n_frames = (len(pickled) - 1) // self.FRAME_SIZE_TARGET + 1
# There should be at least one call to write per frame
self.assertGreaterEqual(len(writer.chunks), n_frames)
# but not too many either: there can be one for the proto,
# one per-frame header, one per frame for the actual contents,
# and two for the header.
self.assertLessEqual(len(writer.chunks), 2 * n_frames + 3)
chunk_sizes = [len(c) for c in writer.chunks]
large_sizes = [s for s in chunk_sizes
if s >= self.FRAME_SIZE_TARGET]
medium_sizes = [s for s in chunk_sizes
if 9 < s < self.FRAME_SIZE_TARGET]
small_sizes = [s for s in chunk_sizes if s <= 9]
# Large chunks should not be too large:
for chunk_size in large_sizes:
self.assertLess(chunk_size, 2 * self.FRAME_SIZE_TARGET,
chunk_sizes)
# There shouldn't bee too many small chunks: the protocol header,
# the frame headers and the large string headers are written
# in small chunks.
self.assertLessEqual(len(small_sizes),
len(large_sizes) + len(medium_sizes) + 3,
chunk_sizes)
def test_nested_names(self):
global Nested
class Nested:
class A:
class B:
class C:
pass
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for obj in [Nested.A, Nested.A.B, Nested.A.B.C]:
with self.subTest(proto=proto, obj=obj):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIs(obj, unpickled)
def test_recursive_nested_names(self):
global Recursive
class Recursive:
pass
Recursive.mod = sys.modules[Recursive.__module__]
Recursive.__qualname__ = 'Recursive.mod.Recursive'
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
unpickled = self.loads(self.dumps(Recursive, proto))
self.assertIs(unpickled, Recursive)
del Recursive.mod # break reference loop
def test_py_methods(self):
global PyMethodsTest
class PyMethodsTest:
@staticmethod
def cheese():
return "cheese"
@classmethod
def wine(cls):
assert cls is PyMethodsTest
return "wine"
def biscuits(self):
assert isinstance(self, PyMethodsTest)
return "biscuits"
class Nested:
"Nested class"
@staticmethod
def ketchup():
return "ketchup"
@classmethod
def maple(cls):
assert cls is PyMethodsTest.Nested
return "maple"
def pie(self):
assert isinstance(self, PyMethodsTest.Nested)
return "pie"
py_methods = (
PyMethodsTest.cheese,
PyMethodsTest.wine,
PyMethodsTest().biscuits,
PyMethodsTest.Nested.ketchup,
PyMethodsTest.Nested.maple,
PyMethodsTest.Nested().pie
)
py_unbound_methods = (
(PyMethodsTest.biscuits, PyMethodsTest),
(PyMethodsTest.Nested.pie, PyMethodsTest.Nested)
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method in py_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(), unpickled())
for method, cls in py_unbound_methods:
obj = cls()
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(obj), unpickled(obj))
def test_c_methods(self):
global Subclass
class Subclass(tuple):
class Nested(str):
pass
c_methods = (
# bound built-in method
("abcd".index, ("c",)),
# unbound built-in method
(str.index, ("abcd", "c")),
# bound "slot" method
([1, 2, 3].__len__, ()),
# unbound "slot" method
(list.__len__, ([1, 2, 3],)),
# bound "coexist" method
({1, 2}.__contains__, (2,)),
# unbound "coexist" method
(set.__contains__, ({1, 2}, 2)),
# built-in class method
(dict.fromkeys, (("a", 1), ("b", 2))),
# built-in static method
(bytearray.maketrans, (b"abc", b"xyz")),
# subclass methods
(Subclass([1,2,2]).count, (2,)),
(Subclass.count, (Subclass([1,2,2]), 2)),
(Subclass.Nested("sweet").count, ("e",)),
(Subclass.Nested.count, (Subclass.Nested("sweet"), "e")),
)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for method, args in c_methods:
with self.subTest(proto=proto, method=method):
unpickled = self.loads(self.dumps(method, proto))
self.assertEqual(method(*args), unpickled(*args))
def test_compat_pickle(self):
tests = [
(range(1, 7), '__builtin__', 'xrange'),
(map(int, '123'), 'itertools', 'imap'),
(functools.reduce, '__builtin__', 'reduce'),
(dbm.whichdb, 'whichdb', 'whichdb'),
(Exception(), 'exceptions', 'Exception'),
(collections.UserDict(), 'UserDict', 'IterableUserDict'),
(collections.UserList(), 'UserList', 'UserList'),
(collections.defaultdict(), 'collections', 'defaultdict'),
]
for val, mod, name in tests:
for proto in range(3):
with self.subTest(type=type(val), proto=proto):
pickled = self.dumps(val, proto)
self.assertIn(('c%s\n%s' % (mod, name)).encode(), pickled)
self.assertIs(type(self.loads(pickled)), type(val))
def test_local_lookup_error(self):
# Test that whichmodule() errors out cleanly when looking up
# an assumed globally-reachable object fails.
def f():
pass
# Since the function is local, lookup will fail
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Same without a __module__ attribute (exercises a different path
# in _pickle.c).
del f.__module__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
# Yet a different path.
f.__name__ = f.__qualname__
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((AttributeError, pickle.PicklingError)):
pickletools.dis(self.dumps(f, proto))
#
# PEP 574 tests below
#
def buffer_like_objects(self):
# Yield buffer-like objects with the bytestring "abcdef" in them
bytestring = b"abcdefgh"
yield ZeroCopyBytes(bytestring)
yield ZeroCopyBytearray(bytestring)
if _testbuffer is not None:
items = list(bytestring)
value = int.from_bytes(bytestring, byteorder='little')
for flags in (0, _testbuffer.ND_WRITABLE):
# 1-D, contiguous
yield PicklableNDArray(items, format='B', shape=(8,),
flags=flags)
# 2-D, C-contiguous
yield PicklableNDArray(items, format='B', shape=(4, 2),
strides=(2, 1), flags=flags)
# 2-D, Fortran-contiguous
yield PicklableNDArray(items, format='B',
shape=(4, 2), strides=(1, 4),
flags=flags)
def test_in_band_buffers(self):
# Test in-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(obj, proto)
if obj.c_contiguous and proto >= 5:
# The raw memory bytes are serialized in physical order
self.assertIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 0)
if proto >= 5:
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data),
1 if obj.readonly else 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data),
0 if obj.readonly else 1)
# Return a true value from buffer_callback should have
# the same effect
def buffer_callback(obj):
return True
data2 = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertEqual(data2, data)
new = self.loads(data)
# It's a copy
self.assertIsNot(new, obj)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# XXX Unfortunately cannot test non-contiguous array
# (see comment in PicklableNDArray.__reduce_ex__)
def test_oob_buffers(self):
# Test out-of-band buffers (PEP 574)
for obj in self.buffer_like_objects():
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
self.dumps(obj, proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = lambda pb: buffers.append(pb.raw())
data = self.dumps(obj, proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"abcdefgh", data)
self.assertEqual(count_opcode(pickle.SHORT_BINBYTES, data), 0)
self.assertEqual(count_opcode(pickle.BYTEARRAY8, data), 0)
self.assertEqual(count_opcode(pickle.NEXT_BUFFER, data), 1)
self.assertEqual(count_opcode(pickle.READONLY_BUFFER, data),
1 if obj.readonly else 0)
if obj.c_contiguous:
self.assertEqual(bytes(buffers[0]), b"abcdefgh")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
self.loads(data)
new = self.loads(data, buffers=buffers)
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
# Non-sequence buffers accepted too
new = self.loads(data, buffers=iter(buffers))
if obj.zero_copy_reconstruct:
# Zero-copy achieved
self.assertIs(new, obj)
else:
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_oob_buffers_writable_to_readonly(self):
# Test reconstructing readonly object from writable buffer
obj = ZeroCopyBytes(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(obj, proto, buffer_callback=buffer_callback)
buffers = map(bytearray, buffers)
new = self.loads(data, buffers=buffers)
self.assertIs(type(new), type(obj))
self.assertEqual(new, obj)
def test_picklebuffer_error(self):
# PickleBuffer forbidden with protocol < 5
pb = pickle.PickleBuffer(b"foobar")
for proto in range(0, 5):
with self.assertRaises(pickle.PickleError):
self.dumps(pb, proto)
def test_buffer_callback_error(self):
def buffer_callback(buffers):
1/0
pb = pickle.PickleBuffer(b"foobar")
with self.assertRaises(ZeroDivisionError):
self.dumps(pb, 5, buffer_callback=buffer_callback)
def test_buffers_error(self):
pb = pickle.PickleBuffer(b"foobar")
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(pb, proto, buffer_callback=[].append)
# Non iterable buffers
with self.assertRaises(TypeError):
self.loads(data, buffers=object())
# Buffer iterable exhausts too early
with self.assertRaises(pickle.UnpicklingError):
self.loads(data, buffers=[])
def test_inband_accept_default_buffers_argument(self):
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
data_pickled = self.dumps(1, proto, buffer_callback=None)
data = self.loads(data_pickled, buffers=None)
@unittest.skipIf(np is None, "Test needs Numpy")
def test_buffers_numpy(self):
def check_no_copy(x, y):
np.testing.assert_equal(x, y)
self.assertEqual(x.ctypes.data, y.ctypes.data)
def check_copy(x, y):
np.testing.assert_equal(x, y)
self.assertNotEqual(x.ctypes.data, y.ctypes.data)
def check_array(arr):
# In-band
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
data = self.dumps(arr, proto)
new = self.loads(data)
check_copy(arr, new)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffer_callback = lambda _: True
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data)
check_copy(arr, new)
# Out-of-band
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = self.dumps(arr, proto, buffer_callback=buffer_callback)
new = self.loads(data, buffers=buffers)
if arr.flags.c_contiguous or arr.flags.f_contiguous:
check_no_copy(arr, new)
else:
check_copy(arr, new)
# 1-D
arr = np.arange(6)
check_array(arr)
# 1-D, non-contiguous
check_array(arr[::2])
# 2-D, C-contiguous
arr = np.arange(12).reshape((3, 4))
check_array(arr)
# 2-D, F-contiguous
check_array(arr.T)
# 2-D, non-contiguous
check_array(arr[::2])
class BigmemPickleTests(unittest.TestCase):
# Binary protocols can serialize longs of up to 2 GiB-1
@bigmemtest(size=_2G, memuse=3.6, dry_run=False)
def test_huge_long_32b(self, size):
data = 1 << (8 * size)
try:
for proto in protocols:
if proto < 2:
continue
with self.subTest(proto=proto):
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
finally:
data = None
# Protocol 3 can serialize up to 4 GiB-1 as a bytes object
# (older protocols don't have a dedicated opcode for bytes and are
# too inefficient)
@bigmemtest(size=_2G, memuse=2.5, dry_run=False)
def test_huge_bytes_32b(self, size):
data = b"abcd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES +
struct.pack("<I", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
@bigmemtest(size=_4G, memuse=2.5, dry_run=False)
def test_huge_bytes_64b(self, size):
data = b"acbd" * (size // 4)
try:
for proto in protocols:
if proto < 3:
continue
with self.subTest(proto=proto):
if proto == 3:
# Protocol 3 does not support large bytes objects.
# Verify that we do not crash when processing one.
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINBYTES8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(data)
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
finally:
pickled = None
finally:
data = None
# All protocols use 1-byte per printable ASCII character; we add another
# byte because the encoded form has to be copied into the internal buffer.
@bigmemtest(size=_2G, memuse=8, dry_run=False)
def test_huge_str_32b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE +
struct.pack("<I", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# BINUNICODE (protocols 1, 2 and 3) cannot carry more than 2**32 - 1 bytes
# of utf-8 encoded unicode. BINUNICODE8 (protocol 4) supports these huge
# unicode strings however.
@bigmemtest(size=_4G, memuse=8, dry_run=False)
def test_huge_str_64b(self, size):
data = "abcd" * (size // 4)
try:
for proto in protocols:
if proto == 0:
continue
with self.subTest(proto=proto):
if proto < 4:
with self.assertRaises((ValueError, OverflowError)):
self.dumps(data, protocol=proto)
continue
try:
pickled = self.dumps(data, protocol=proto)
header = (pickle.BINUNICODE8 +
struct.pack("<Q", len(data)))
data_start = pickled.index(b'abcd')
self.assertEqual(
header,
pickled[data_start-len(header):data_start])
self.assertEqual((pickled.rindex(b"abcd") + len(b"abcd") -
pickled.index(b"abcd")), len(data))
finally:
pickled = None
finally:
data = None
# Test classes for reduce_ex
class REX_one(object):
"""No __reduce_ex__ here, but inheriting it from object"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
class REX_two(object):
"""No __reduce__ here, but inheriting it from object"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed("This __reduce__ shouldn't be called")
class REX_four(object):
"""Calling base class method should succeed"""
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
class REX_five(object):
"""This one used to fail with infinite recursion"""
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
class REX_six(object):
"""This class is used to check the 4th argument (list iterator) of
the reduce protocol.
"""
def __init__(self, items=None):
self.items = items if items is not None else []
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def append(self, item):
self.items.append(item)
def __reduce__(self):
return type(self), (), None, iter(self.items), None
class REX_seven(object):
"""This class is used to check the 5th argument (dict iterator) of
the reduce protocol.
"""
def __init__(self, table=None):
self.table = table if table is not None else {}
def __eq__(self, other):
return type(self) is type(other) and self.table == other.table
def __setitem__(self, key, value):
self.table[key] = value
def __reduce__(self):
return type(self), (), None, None, iter(self.table.items())
# Test classes for newobj
class MyInt(int):
sample = 1
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(str):
sample = "hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
class MySet(set):
sample = {"a", "b"}
class MyFrozenSet(frozenset):
sample = frozenset({"a", "b"})
myclasses = [MyInt, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict, MySet, MyFrozenSet]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(int):
def __init__(self, *args, **kwargs):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
def __eq__(self, other):
return int(self) == int(other) and self.__dict__ == other.__dict__
class ComplexNewObj(SimpleNewObj):
def __getnewargs__(self):
return ('%X' % self, 16)
class ComplexNewObjEx(SimpleNewObj):
def __getnewargs_ex__(self):
return ('%X' % self,), {'base': 16}
class BadGetattr:
def __getattr__(self, key):
self.foo
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_closed_file(self):
f = open(TESTFN, "wb")
try:
f.close()
self.assertRaises(ValueError, self.dump, 123, f)
finally:
support.unlink(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = io.BytesIO()
data = [123, {}, 124]
self.dump(data, stream)
stream.seek(0)
unpickled = self.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(pickle.HIGHEST_PROTOCOL, 5)
def test_callapi(self):
f = io.BytesIO()
# With and without keyword arguments
self.dump(123, f, -1)
self.dump(123, file=f, protocol=-1)
self.dumps(123, -1)
self.dumps(123, protocol=-1)
self.Pickler(f, -1)
self.Pickler(f, protocol=-1)
def test_dump_text_file(self):
f = open(TESTFN, "w")
try:
for proto in protocols:
self.assertRaises(TypeError, self.dump, 123, f, proto)
finally:
f.close()
support.unlink(TESTFN)
def test_incomplete_input(self):
s = io.BytesIO(b"X''.")
self.assertRaises((EOFError, struct.error, pickle.UnpicklingError), self.load, s)
def test_bad_init(self):
# Test issue3664 (pickle can segfault from a badly initialized Pickler).
# Override initialization without calling __init__() of the superclass.
class BadPickler(self.Pickler):
def __init__(self): pass
class BadUnpickler(self.Unpickler):
def __init__(self): pass
self.assertRaises(pickle.PicklingError, BadPickler().dump, 0)
self.assertRaises(pickle.UnpicklingError, BadUnpickler().load)
def check_dumps_loads_oob_buffers(self, dumps, loads):
# No need to do the full gamut of tests here, just enough to
# check that dumps() and loads() redirect their arguments
# to the underlying Pickler and Unpickler, respectively.
obj = ZeroCopyBytes(b"foo")
for proto in range(0, 5):
# Need protocol >= 5 for buffer_callback
with self.assertRaises(ValueError):
dumps(obj, protocol=proto,
buffer_callback=[].append)
for proto in range(5, pickle.HIGHEST_PROTOCOL + 1):
buffers = []
buffer_callback = buffers.append
data = dumps(obj, protocol=proto,
buffer_callback=buffer_callback)
self.assertNotIn(b"foo", data)
self.assertEqual(bytes(buffers[0]), b"foo")
# Need buffers argument to unpickle properly
with self.assertRaises(pickle.UnpicklingError):
loads(data)
new = loads(data, buffers=buffers)
self.assertIs(new, obj)
def test_dumps_loads_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dumps() and loads()
self.check_dumps_loads_oob_buffers(self.dumps, self.loads)
def test_dump_load_oob_buffers(self):
# Test out-of-band buffers (PEP 574) with top-level dump() and load()
def dumps(obj, **kwargs):
f = io.BytesIO()
self.dump(obj, f, **kwargs)
return f.getvalue()
def loads(data, **kwargs):
f = io.BytesIO(data)
return self.load(f, **kwargs)
self.check_dumps_loads_oob_buffers(dumps, loads)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
elif object == "test_false_value":
self.false_count += 1
return ""
else:
return None
def persistent_load(self, oid):
if not oid:
self.load_false_count += 1
return "test_false_value"
else:
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
L = list(range(10)) + ["test_false_value"]
for proto in protocols:
self.id_count = 0
self.false_count = 0
self.load_false_count = 0
self.load_count = 0
self.assertEqual(self.loads(self.dumps(L, proto)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.false_count, 1)
self.assertEqual(self.load_count, 5)
self.assertEqual(self.load_false_count, 1)
class AbstractIdentityPersistentPicklerTests(unittest.TestCase):
def persistent_id(self, obj):
return obj
def persistent_load(self, pid):
return pid
def _check_return_correct_type(self, obj, proto):
unpickled = self.loads(self.dumps(obj, proto))
self.assertIsInstance(unpickled, type(obj))
self.assertEqual(unpickled, obj)
def test_return_correct_type(self):
for proto in protocols:
# Protocol 0 supports only ASCII strings.
if proto == 0:
self._check_return_correct_type("abc", 0)
else:
for obj in [b"abc\n", "abc\n", -1, -1.1 * 0.1, str]:
self._check_return_correct_type(obj, proto)
def test_protocol0_is_ascii_only(self):
non_ascii_str = "\N{EMPTY SET}"
self.assertRaises(pickle.PicklingError, self.dumps, non_ascii_str, 0)
pickled = pickle.PERSID + non_ascii_str.encode('utf-8') + b'\n.'
self.assertRaises(pickle.UnpicklingError, self.loads, pickled)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
pickler_class = None
unpickler_class = None
def setUp(self):
assert self.pickler_class
assert self.unpickler_class
def test_clear_pickler_memo(self):
# To test whether clear_memo() has any effect, we pickle an object,
# then pickle it again without clearing the memo; the two serialized
# forms should be different. If we clear_memo() and then pickle the
# object again, the third serialized form should be identical to the
# first one we obtained.
data = ["abcdefg", "abcdefg", 44]
for proto in protocols:
f = io.BytesIO()
pickler = self.pickler_class(f, proto)
pickler.dump(data)
first_pickled = f.getvalue()
# Reset BytesIO object.
f.seek(0)
f.truncate()
pickler.dump(data)
second_pickled = f.getvalue()
# Reset the Pickler and BytesIO objects.
pickler.clear_memo()
f.seek(0)
f.truncate()
pickler.dump(data)
third_pickled = f.getvalue()
self.assertNotEqual(first_pickled, second_pickled)
self.assertEqual(first_pickled, third_pickled)
def test_priming_pickler_memo(self):
# Verify that we can set the Pickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
self.assertNotEqual(first_pickled, primed_pickled)
def test_priming_unpickler_memo(self):
# Verify that we can set the Unpickler's memo attribute.
data = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data)
first_pickled = f.getvalue()
f = io.BytesIO()
primed = self.pickler_class(f)
primed.memo = pickler.memo
primed.dump(data)
primed_pickled = f.getvalue()
unpickler = self.unpickler_class(io.BytesIO(first_pickled))
unpickled_data1 = unpickler.load()
self.assertEqual(unpickled_data1, data)
primed = self.unpickler_class(io.BytesIO(primed_pickled))
primed.memo = unpickler.memo
unpickled_data2 = primed.load()
primed.memo.clear()
self.assertEqual(unpickled_data2, data)
self.assertTrue(unpickled_data2 is unpickled_data1)
def test_reusing_unpickler_objects(self):
data1 = ["abcdefg", "abcdefg", 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data1)
pickled1 = f.getvalue()
data2 = ["abcdefg", 44, 44]
f = io.BytesIO()
pickler = self.pickler_class(f)
pickler.dump(data2)
pickled2 = f.getvalue()
f = io.BytesIO()
f.write(pickled1)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data1)
f.seek(0)
f.truncate()
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
def _check_multiple_unpicklings(self, ioclass):
for proto in protocols:
with self.subTest(proto=proto):
data1 = [(x, str(x)) for x in range(2000)] + [b"abcde", len]
f = ioclass()
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data1)
pickled = f.getvalue()
N = 5
f = ioclass(pickled * N)
unpickler = self.unpickler_class(f)
for i in range(N):
if f.seekable():
pos = f.tell()
self.assertEqual(unpickler.load(), data1)
if f.seekable():
self.assertEqual(f.tell(), pos + len(pickled))
self.assertRaises(EOFError, unpickler.load)
def test_multiple_unpicklings_seekable(self):
self._check_multiple_unpicklings(io.BytesIO)
def test_multiple_unpicklings_unseekable(self):
self._check_multiple_unpicklings(UnseekableIO)
def test_unpickling_buffering_readline(self):
# Issue #12687: the unpickler's buffering logic could fail with
# text mode opcodes.
data = list(range(10))
for proto in protocols:
for buf_size in range(1, 11):
f = io.BufferedRandom(io.BytesIO(), buffer_size=buf_size)
pickler = self.pickler_class(f, protocol=proto)
pickler.dump(data)
f.seek(0)
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
# Tests for dispatch_table attribute
REDUCE_A = 'reduce_A'
class AAA(object):
def __reduce__(self):
return str, (REDUCE_A,)
class BBB(object):
def __init__(self):
# Add an instance attribute to enable state-saving routines at pickling
# time.
self.a = "some attribute"
def __setstate__(self, state):
self.a = "BBB.__setstate__"
def setstate_bbb(obj, state):
"""Custom state setter for BBB objects
Such callable may be created by other persons than the ones who created the
BBB class. If passed as the state_setter item of a custom reducer, this
allows for custom state setting behavior of BBB objects. One can think of
it as the analogous of list_setitems or dict_setitems but for foreign
classes/functions.
"""
obj.a = "custom state_setter"
class AbstractCustomPicklerClass:
"""Pickler implementing a reducing hook using reducer_override."""
def reducer_override(self, obj):
obj_name = getattr(obj, "__name__", None)
if obj_name == 'f':
# asking the pickler to save f as 5
return int, (5, )
if obj_name == 'MyClass':
return str, ('some str',)
elif obj_name == 'g':
# in this case, the callback returns an invalid result (not a 2-5
# tuple or a string), the pickler should raise a proper error.
return False
elif obj_name == 'h':
# Simulate a case when the reducer fails. The error should
# be propagated to the original ``dump`` call.
raise ValueError('The reducer just failed')
return NotImplemented
class AbstractHookTests(unittest.TestCase):
def test_pickler_hook(self):
# test the ability of a custom, user-defined CPickler subclass to
# override the default reducing routines of any type using the method
# reducer_override
def f():
pass
def g():
pass
def h():
pass
class MyClass:
pass
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
bio = io.BytesIO()
p = self.pickler_class(bio, proto)
p.dump([f, MyClass, math.log])
new_f, some_str, math_log = pickle.loads(bio.getvalue())
self.assertEqual(new_f, 5)
self.assertEqual(some_str, 'some str')
# math.log does not have its usual reducer overriden, so the
# custom reduction callback should silently direct the pickler
# to the default pickling by attribute, by returning
# NotImplemented
self.assertIs(math_log, math.log)
with self.assertRaises(pickle.PicklingError):
p.dump(g)
with self.assertRaisesRegex(
ValueError, 'The reducer just failed'):
p.dump(h)
class AbstractDispatchTableTests(unittest.TestCase):
def test_default_dispatch_table(self):
# No dispatch_table attribute by default
f = io.BytesIO()
p = self.pickler_class(f, 0)
with self.assertRaises(AttributeError):
p.dispatch_table
self.assertFalse(hasattr(p, 'dispatch_table'))
def test_class_dispatch_table(self):
# A dispatch_table attribute can be specified class-wide
dt = self.get_dispatch_table()
class MyPickler(self.pickler_class):
dispatch_table = dt
def dumps(obj, protocol=None):
f = io.BytesIO()
p = MyPickler(f, protocol)
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def test_instance_dispatch_table(self):
# A dispatch_table attribute can also be specified instance-wide
dt = self.get_dispatch_table()
def dumps(obj, protocol=None):
f = io.BytesIO()
p = self.pickler_class(f, protocol)
p.dispatch_table = dt
self.assertEqual(p.dispatch_table, dt)
p.dump(obj)
return f.getvalue()
self._test_dispatch_table(dumps, dt)
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
def default_load_dump(obj):
return pickle.loads(pickle.dumps(obj, 0))
# pickling complex numbers using protocol 0 relies on copyreg
# so check pickling a complex number still works
z = 1 + 2j
self.assertEqual(custom_load_dump(z), z)
self.assertEqual(default_load_dump(z), z)
# modify pickling of complex
REDUCE_1 = 'reduce_1'
def reduce_1(obj):
return str, (REDUCE_1,)
dispatch_table[complex] = reduce_1
self.assertEqual(custom_load_dump(z), REDUCE_1)
self.assertEqual(default_load_dump(z), z)
# check picklability of AAA and BBB
a = AAA()
b = BBB()
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# modify pickling of BBB
dispatch_table[BBB] = reduce_1
self.assertEqual(custom_load_dump(a), REDUCE_A)
self.assertEqual(custom_load_dump(b), REDUCE_1)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# revert pickling of BBB and modify pickling of AAA
REDUCE_2 = 'reduce_2'
def reduce_2(obj):
return str, (REDUCE_2,)
dispatch_table[AAA] = reduce_2
del dispatch_table[BBB]
self.assertEqual(custom_load_dump(a), REDUCE_2)
self.assertIsInstance(custom_load_dump(b), BBB)
self.assertEqual(default_load_dump(a), REDUCE_A)
self.assertIsInstance(default_load_dump(b), BBB)
# End-to-end testing of save_reduce with the state_setter keyword
# argument. This is a dispatch_table test as the primary goal of
# state_setter is to tweak objects reduction behavior.
# In particular, state_setter is useful when the default __setstate__
# behavior is not flexible enough.
# No custom reducer for b has been registered for now, so
# BBB.__setstate__ should be used at unpickling time
self.assertEqual(default_load_dump(b).a, "BBB.__setstate__")
def reduce_bbb(obj):
return BBB, (), obj.__dict__, None, None, setstate_bbb
dispatch_table[BBB] = reduce_bbb
# The custom reducer reduce_bbb includes a state setter, that should
# have priority over BBB.__setstate__
self.assertEqual(custom_load_dump(b).a, "custom state_setter")
if __name__ == "__main__":
# Print some stuff that can be used to rewrite DATA{0,1,2}
from pickletools import dis
x = create_data()
for i in range(pickle.HIGHEST_PROTOCOL+1):
p = pickle.dumps(x, i)
print("DATA{0} = (".format(i))
for j in range(0, len(p), 20):
b = bytes(p[j:j+20])
print(" {0!r}".format(b))
print(")")
print()
print("# Disassembly of DATA{0}".format(i))
print("DATA{0}_DIS = \"\"\"\\".format(i))
dis(p)
print("\"\"\"")
print()
|
repository.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import os
import stat
from operator import itemgetter
import mimetypes
import logging
import string
import re
from subprocess import Popen, PIPE
from hashlib import sha1
from datetime import datetime, timedelta
from time import time
from collections import defaultdict, OrderedDict
from six.moves.urllib.parse import urljoin
from threading import Thread
from six.moves.queue import Queue
from itertools import chain, islice
from difflib import SequenceMatcher
import typing
import tg
from paste.deploy.converters import asint, asbool
from tg import tmpl_context as c
from tg import app_globals as g
import pymongo
import pymongo.errors
import bson
import six
from ming import schema as S
from ming import Field, collection, Index
from ming.utils import LazyProperty
from ming.odm import FieldProperty, session, Mapper, mapper, MappedClass
from ming.base import Object
from allura.lib import helpers as h
from allura.lib import utils
from allura.lib.security import has_access
from .artifact import Artifact, VersionedArtifact
from .auth import User
from .timeline import ActivityObject
from .monq_model import MonQTask
from .project import AppConfig
from .session import main_doc_session
from .session import repository_orm_session
from io import open
from six.moves import range
if typing.TYPE_CHECKING:
from ming.odm.mapper import Query
log = logging.getLogger(__name__)
config = utils.ConfigProxy(
common_suffix='forgemail.domain',
)
README_RE = re.compile('^README(\.[^.]*)?$', re.IGNORECASE)
VIEWABLE_EXTENSIONS = frozenset([
'.php', '.py', '.js', '.java', '.html', '.htm', '.yaml', '.sh',
'.rb', '.phtml', '.txt', '.bat', '.ps1', '.xhtml', '.css', '.cfm', '.jsp', '.jspx',
'.pl', '.php4', '.php3', '.rhtml', '.svg', '.markdown', '.json', '.ini', '.tcl', '.vbs', '.xsl'])
# Some schema types
SUser = dict(name=str, email=str, date=datetime)
SObjType = S.OneOf('blob', 'tree', 'submodule')
# Used for when we're going to batch queries using $in
QSIZE = 100
BINARY_EXTENSIONS = frozenset([
".3ds", ".3g2", ".3gp", ".7z", ".a", ".aac", ".adp", ".ai", ".aif", ".apk", ".ar", ".asf", ".au", ".avi", ".bak",
".bin", ".bk", ".bmp", ".btif", ".bz2", ".cab", ".caf", ".cgm", ".cmx", ".cpio", ".cr2", ".dat", ".deb", ".djvu",
".dll", ".dmg", ".dng", ".doc", ".docx", ".dra", ".DS_Store", ".dsk", ".dts", ".dtshd", ".dvb", ".dwg", ".dxf",
".ecelp4800", ".ecelp7470", ".ecelp9600", ".egg", ".eol", ".eot", ".epub", ".exe", ".f4v", ".fbs", ".fh", ".fla",
".flac", ".fli", ".flv", ".fpx", ".fst", ".fvt", ".g3", ".gif", ".gz", ".h261", ".h263", ".h264", ".ico", ".ief",
".img", ".ipa", ".iso", ".jar", ".jpeg", ".jpg", ".jpgv", ".jpm", ".jxr", ".ktx", ".lvp", ".lz", ".lzma", ".lzo",
".m3u", ".m4a", ".m4v", ".mar", ".mdi", ".mid", ".mj2", ".mka", ".mkv", ".mmr", ".mng", ".mov", ".movie", ".mp3",
".mp4", ".mp4a", ".mpeg", ".mpg", ".mpga", ".mxu", ".nef", ".npx", ".o", ".oga", ".ogg", ".ogv", ".otf", ".pbm",
".pcx", ".pdf", ".pea", ".pgm", ".pic", ".png", ".pnm", ".ppm", ".psd", ".pya", ".pyc", ".pyo", ".pyv", ".qt",
".rar", ".ras", ".raw", ".rgb", ".rip", ".rlc", ".rz", ".s3m", ".s7z", ".scpt", ".sgi", ".shar", ".sil", ".smv",
".so", ".sub", ".swf", ".tar", ".tbz2", ".tga", ".tgz", ".tif", ".tiff", ".tlz", ".ttf", ".uvh", ".uvi",
".uvm", ".uvp", ".uvs", ".uvu", ".viv", ".vob", ".war", ".wav", ".wax", ".wbmp", ".wdp", ".weba", ".webm", ".webp",
".whl", ".wm", ".wma", ".wmv", ".wmx", ".woff", ".woff2", ".wvx", ".xbm", ".xif", ".xm", ".xpi", ".xpm", ".xwd",
".xz", ".z", ".zip", ".zipx"
])
PYPELINE_EXTENSIONS = frozenset(utils.MARKDOWN_EXTENSIONS + ['.rst', '.textile', '.creole'])
DIFF_SIMILARITY_THRESHOLD = .5 # used for determining file renames
class RepositoryImplementation(object):
# Repository-specific code
def init(self): # pragma no cover
raise NotImplementedError('init')
def clone_from(self, source_url): # pragma no cover
raise NotImplementedError('clone_from')
def commit(self, revision): # pragma no cover
raise NotImplementedError('commit')
def all_commit_ids(self): # pragma no cover
raise NotImplementedError('all_commit_ids')
def new_commits(self, all_commits=False): # pragma no cover
'''Return a list of native commits in topological order (heads first).
"commit" is a repo-native object, NOT a Commit object.
If all_commits is False, only return commits not already indexed.
'''
raise NotImplementedError('new_commits')
def commit_parents(self, commit): # pragma no cover
'''Return a list of native commits for the parents of the given (native)
commit'''
raise NotImplementedError('commit_parents')
def refresh_commit_info(self, oid, lazy=True): # pragma no cover
'''Refresh the data in the commit with id oid'''
raise NotImplementedError('refresh_commit_info')
def _setup_hooks(self, source_path=None): # pragma no cover
'''Install a hook in the repository that will ping the refresh url for
the repo. Optionally provide a path from which to copy existing hooks.'''
raise NotImplementedError('_setup_hooks')
# pragma no cover
def log(self, revs=None, path=None, exclude=None, id_only=True, **kw):
"""
Returns a generator that returns information about commits reachable
by revs.
revs can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If revs is None, the
default branch head will be used.
If path is not None, only commits which modify files under path
will be included.
Exclude can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If not None, then any
revisions reachable by any of the revisions in exclude will not be
included.
If id_only is True, returns only the commit ID (which can be faster),
otherwise it returns detailed information about each commit.
"""
raise NotImplementedError('log')
def compute_tree_new(self, commit, path='/'): # pragma no cover
'''Used in hg and svn to compute a git-like-tree lazily with the new models'''
raise NotImplementedError('compute_tree')
def open_blob(self, blob): # pragma no cover
'''Return a file-like object that contains the contents of the blob'''
raise NotImplementedError('open_blob')
def blob_size(self, blob):
'''Return a blob size in bytes'''
raise NotImplementedError('blob_size')
def tarball(self, revision, path=None):
'''Create a tarball for the revision'''
raise NotImplementedError('tarball')
def is_empty(self):
'''Determine if the repository is empty by checking the filesystem'''
raise NotImplementedError('is_empty')
def is_file(self, path, rev=None):
'''Determine if the repository is a file by checking the filesystem'''
raise NotImplementedError('is_file')
@classmethod
def shorthand_for_commit(cls, oid):
return '[%s]' % oid[:6]
def symbolics_for_commit(self, commit):
'''Return symbolic branch and tag names for a commit.'''
raise NotImplementedError('symbolics_for_commit')
def url_for_commit(self, commit, url_type='ci'):
'return an URL, given either a commit or object id'
if isinstance(commit, six.string_types):
object_id = commit
else:
object_id = commit._id
if '/' in object_id:
object_id = os.path.join(
object_id, self._repo.app.END_OF_REF_ESCAPE)
return os.path.join(self._repo.url(), url_type, object_id) + '/'
def _setup_paths(self, create_repo_dir=True):
'''
Ensure that the base directory in which the repo lives exists.
If create_repo_dir is True, also ensure that the directory
of the repo itself exists.
'''
if not self._repo.fs_path.endswith('/'):
self._repo.fs_path += '/'
fullname = self._repo.fs_path + self._repo.name
# make the base dir for repo, regardless
if not os.path.exists(self._repo.fs_path):
os.makedirs(self._repo.fs_path)
if create_repo_dir and not os.path.exists(fullname):
os.mkdir(fullname)
return fullname
def _setup_special_files(self, source_path=None):
magic_file = os.path.join(
self._repo.fs_path, self._repo.name, tg.config.get(
'scm.magic_file', '.ALLURA-REPOSITORY'))
with open(magic_file, 'w') as f:
f.write(six.ensure_text(self._repo.repo_id))
os.chmod(magic_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
self._setup_hooks(source_path)
@property
def head(self):
raise NotImplementedError('head')
@property
def heads(self):
raise NotImplementedError('heads')
@property
def branches(self):
raise NotImplementedError('branches')
@property
def tags(self):
raise NotImplementedError('tags')
def last_commit_ids(self, commit, paths):
'''
Return a mapping {path: commit_id} of the _id of the last
commit to touch each path, starting from the given commit.
Chunks the set of paths based on lcd_thread_chunk_size and
runs each chunk (if more than one) in a separate thread.
Each thread will call :meth:`_get_last_commit` to get the
commit ID and list of changed files for the last commit
to touch any file in a given chunk.
'''
if not paths:
return {}
timeout = float(tg.config.get('lcd_timeout', 60))
start_time = time()
paths = list(set(paths)) # remove dupes
result = {} # will be appended to from each thread
chunks = Queue()
lcd_chunk_size = asint(tg.config.get('lcd_thread_chunk_size', 10))
num_threads = 0
for s in range(0, len(paths), lcd_chunk_size):
chunks.put(paths[s:s + lcd_chunk_size])
num_threads += 1
def get_ids():
paths = set(chunks.get())
try:
commit_id = commit._id
while paths and commit_id:
if time() - start_time >= timeout:
log.error('last_commit_ids timeout for %s on %s',
commit._id, ', '.join(paths))
break
commit_id, changes = self._get_last_commit(
commit._id, paths)
if commit_id is None:
break
changed = prefix_paths_union(paths, changes)
for path in changed:
result[path] = commit_id
paths -= changed
except Exception as e:
log.exception('Error in SCM thread: %s', e)
finally:
chunks.task_done()
if num_threads == 1:
get_ids()
else:
for i in range(num_threads):
t = Thread(target=get_ids)
t.start()
# reimplement chunks.join() but with a timeout
# see: http://bugs.python.org/issue9634
# (giving threads a bit of extra cleanup time in case they timeout)
chunks.all_tasks_done.acquire()
try:
endtime = time() + timeout + 0.5
while chunks.unfinished_tasks and endtime > time():
chunks.all_tasks_done.wait(endtime - time())
finally:
chunks.all_tasks_done.release()
return result
def _get_last_commit(self, commit_id, paths):
"""
For a given commit ID and set of paths / files,
use the SCM to determine the last commit to touch
any of the given paths / files.
Should return a tuple containing the ID of the
commit and the list of all files changed in the commit.
"""
raise NotImplementedError('_get_last_commit')
def get_changes(self, commit_id):
"""
Return the list of files changed by a given commit.
"""
raise NotImplementedError('get_changes')
def paged_diffs(self, commit_id, start=0, end=None, onlyChangedFiles=False):
"""
Returns files touched by the commit, grouped by status (added, removed,
and changed) and the total number of such files. Paginates according
to :param start: and :param end:.
"""
raise NotImplementedError('paged_diffs')
def merge_request_commits(self, mr):
"""Given MergeRequest :param mr: return list of commits to be merged"""
raise NotImplementedError('merge_request_commits')
class Repository(Artifact, ActivityObject):
BATCH_SIZE = 100
class __mongometa__:
name = str('generic-repository')
indexes = ['upstream_repo.name']
query: 'Query[Repository]'
_impl = None
repo_id = 'repo'
type_s = 'Repository'
_refresh_precompute = True
name = FieldProperty(str)
tool = FieldProperty(str)
fs_path = FieldProperty(str)
url_path = FieldProperty(str)
status = FieldProperty(str)
email_address = ''
additional_viewable_extensions = FieldProperty(str)
heads = FieldProperty(S.Deprecated)
branches = FieldProperty(S.Deprecated)
repo_tags = FieldProperty(S.Deprecated)
upstream_repo = FieldProperty(dict(name=str, url=str))
default_branch_name = FieldProperty(str)
cached_branches = FieldProperty([dict(name=str, object_id=str)])
cached_tags = FieldProperty([dict(name=str, object_id=str)])
def __init__(self, **kw):
if 'name' in kw and 'tool' in kw:
if kw.get('fs_path') is None:
kw['fs_path'] = self.default_fs_path(c.project, kw['tool'])
if kw.get('url_path') is None:
kw['url_path'] = self.default_url_path(c.project, kw['tool'])
super(Repository, self).__init__(**kw)
@property
def activity_name(self):
return 'repo %s' % self.name
@classmethod
def default_fs_path(cls, project, tool):
repos_root = tg.config.get('scm.repos.root', '/')
# if a user-project, the repository path on disk needs to use the actual shortname
# the nice url might have invalid chars
return os.path.join(repos_root, tool, project.url(use_userproject_shortname=True)[1:])
@classmethod
def default_url_path(cls, project, tool):
# if a user-project, the repository checkout path needs to use the actual shortname used on disk
# not a nicer username (e.g. with underscores) used on web browsing urls
return project.url(use_userproject_shortname=True)
@property
def tarball_path(self):
return os.path.join(tg.config.get('scm.repos.tarball.root', '/'),
self.tool,
self.project.shortname[:1],
self.project.shortname[:2],
self.project.shortname,
self.name)
@property
def tarball_tmpdir(self):
return os.path.join(tg.config.get('scm.repos.tarball.tmpdir', tg.config.get('scm.repos.tarball.root', '/')),
self.tool,
self.project.shortname[:1],
self.project.shortname[:2],
self.project.shortname,
self.name)
def tarball_filename(self, revision, path=None):
shortname = c.project.shortname.replace('/', '-')
mount_point = c.app.config.options.mount_point
filename = '%s-%s-%s' % (shortname, mount_point, revision)
return filename
def tarball_url(self, revision, path=None):
filename = '%s%s' % (self.tarball_filename(revision, path), '.zip')
r = os.path.join(self.tool,
self.project.shortname[:1],
self.project.shortname[:2],
self.project.shortname,
self.name,
filename)
return urljoin(tg.config.get('scm.repos.tarball.url_prefix', '/'), r)
def get_tarball_status(self, revision, path=None):
pathname = os.path.join(
self.tarball_path, self.tarball_filename(revision, path))
filename = '%s%s' % (pathname, '.zip')
if os.path.isfile(filename.encode('utf-8')):
return 'complete'
# file doesn't exist, check for busy task
task = MonQTask.query.get(**{
'task_name': 'allura.tasks.repo_tasks.tarball',
'args': [revision, path or ''],
'state': {'$in': ['busy', 'ready']},
})
return task.state if task else None
def __repr__(self): # pragma no cover
return '<%s %s>' % (
self.__class__.__name__,
self.full_fs_path)
# Proxy to _impl
def init(self):
return self._impl.init()
def commit(self, rev):
return self._impl.commit(rev)
def all_commit_ids(self):
return self._impl.all_commit_ids()
def refresh_commit_info(self, oid, seen, lazy=True):
return self._impl.refresh_commit_info(oid, seen, lazy)
def open_blob(self, blob):
return self._impl.open_blob(blob)
def blob_size(self, blob):
return self._impl.blob_size(blob)
def shorthand_for_commit(self, oid):
return self._impl.shorthand_for_commit(oid)
def symbolics_for_commit(self, commit):
return self._impl.symbolics_for_commit(commit)
def url_for_commit(self, commit, url_type='ci'):
return self._impl.url_for_commit(commit, url_type)
def compute_tree_new(self, commit, path='/'):
return self._impl.compute_tree_new(commit, path)
def last_commit_ids(self, commit, paths):
return self._impl.last_commit_ids(commit, paths)
def get_changes(self, commit_id):
return self._impl.get_changes(commit_id)
def is_empty(self):
return self._impl.is_empty()
def is_file(self, path, rev=None):
return self._impl.is_file(path, rev)
def get_heads(self):
"""
Return list of heads for the repo.
It's get_heads() instead of a heads (lazy) property because it would
conflict with the now deprecated heads field. Eventually, we should
try to remove the deprecated fields and clean this up.
"""
return self._impl.heads
def get_branches(self):
"""
Return list of branches for the repo.
It's get_branches() instead of a branches (lazy) property because it
would conflict with the now deprecated branches field. Eventually, we
should try to remove the deprecated fields and clean this up.
"""
return self._impl.branches
def get_tags(self, for_merge_request=False):
"""
Return list of tags for the repo.
It's get_tags() instead of a tags (lazy) property because it
would conflict with the now deprecated tags field. Eventually, we
should try to remove the deprecated fields and clean this up.
"""
return self._impl.tags
@property
def head(self):
return self._impl.head
def set_default_branch(self, name):
return self._impl.set_default_branch(name)
def paged_diffs(self, commit_id, start=0, end=None, onlyChangedFiles=False):
return self._impl.paged_diffs(commit_id, start, end, onlyChangedFiles)
def init_as_clone(self, source_path, source_name, source_url):
self.upstream_repo.name = source_name
self.upstream_repo.url = source_url
session(self).flush(self)
source = source_path if source_path else source_url
self._impl.clone_from(source)
log.info('... %r cloned', self)
g.post_event('repo_cloned', source_url, source_path)
self.refresh(notify=False, new_clone=True)
def log(self, revs=None, path=None, exclude=None, id_only=True, limit=None, **kw):
"""
Returns a generator that returns information about commits reachable
by revs which modify path.
revs can either be a single revision identifier or a list or tuple
of identifiers, each of which can be anything parsable by self.commit().
If revs is None, the default branch head will be used.
If path is not None, then only commits which change files under path
will be included.
Exclude can be None, a single revision identifier, or a list or tuple of
identifiers, each of which can be anything parsable by self.commit().
If not None, then any revisions reachable by any of the revisions in
exclude will not be included.
If id_only is True, returns only the commit ID (which can be faster),
otherwise it returns detailed information about each commit.
"""
if revs is not None and not isinstance(revs, (list, tuple)):
revs = [revs]
if exclude is not None and not isinstance(exclude, (list, tuple)):
exclude = [exclude]
log_iter = self._impl.log(revs, path, exclude=exclude, id_only=id_only, limit=limit, **kw)
return islice(log_iter, limit)
def latest(self, branch=None):
if self._impl is None:
return None
if branch is None:
branch = self.app.default_branch_name
try:
return self.commit(branch)
except Exception:
log.exception('Cannot get latest commit for a branch: %s', branch)
return None
def url(self):
return self.app_config.url()
def refresh_url(self):
refresh_base_url = tg.config.get('scm.repos.refresh_base_url') or tg.config.get('base_url', 'http://localhost:8080')
return '/'.join([
refresh_base_url.rstrip('/'),
'auth/refresh_repo',
self.url().lstrip('/'),
])
def shorthand_id(self):
return self.name
@property
def email_address(self):
return 'noreply@%s%s' % (self.email_domain, config.common_suffix)
def index(self):
result = Artifact.index(self)
result.update(
name_s=self.name,
type_s=self.type_s,
title='{} {} repository'.format(self.project.name, self.app.tool_label))
return result
@property
def full_fs_path(self):
return os.path.join(self.fs_path, self.name)
def suggested_clone_dest_path(self):
owning_user = c.project.user_project_of
if owning_user:
projname = owning_user.username
else:
projname = c.project.shortname.replace('/', '-')
return '%s-%s' % (projname, self.name)
def clone_url(self, category, username=''):
'''Return a URL string suitable for copy/paste that describes _this_ repo,
e.g., for use in a clone/checkout command
'''
if self.app.config.options.get('external_checkout_url', None):
tpl = string.Template(self.app.config.options.external_checkout_url)
else:
tpl = string.Template(tg.config.get('scm.host.%s.%s' % (category, self.tool)))
url = tpl.substitute(dict(username=username, path=self.url_path + self.name))
# this is an svn option, but keeps clone_*() code from diverging
url += self.app.config.options.get('checkout_url', '')
return url
def clone_url_first(self, anon, username=''):
'''
Get first clone_url option, useful for places where we need to show just one
:param bool anon: Anonymous or not
:param str username: optional
'''
cat = self.clone_command_categories(anon=anon)[0]['key']
return self.clone_url(cat, username)
def clone_command(self, category, username=''):
'''Return a string suitable for copy/paste that would clone this repo locally
'''
if not username and c.user not in (None, User.anonymous()):
username = c.user.username
tpl = string.Template(tg.config.get('scm.clone.%s.%s' % (category, self.tool)) or
tg.config.get('scm.clone.%s' % self.tool))
return tpl.substitute(dict(username=username,
source_url=self.clone_url(category, username),
dest_path=self.suggested_clone_dest_path()))
def clone_command_first(self, anon, username=''):
'''
Get first clone_command option, useful for places where we need to show just one
:param bool anon: Anonymous or not
:param str username: optional
'''
cat = self.clone_command_categories(anon=anon)[0]['key']
return self.clone_command(cat, username)
def clone_command_categories(self, anon):
conf = tg.config.get('scm.clonechoices{}.{}'.format('_anon' if anon else '', self.tool))
if not conf and anon:
# check for a non-anon config
conf = tg.config.get('scm.clonechoices.{}'.format(self.tool))
if conf:
return json.loads(conf)
elif anon:
# defaults to match historical scm.clone.* configs, in case someone updates Allura but not their .ini
return [{"name": "RO", "key": "ro", "title": "Read Only"},
{"name": "HTTPS", "key": "https_anon", "title": "HTTPS"}]
else:
return [{"name": "RW", "key": "rw", "title": "Read/Write"},
{"name": "RO", "key": "ro", "title": "Read Only"},
{"name": "HTTPS", "key": "https", "title": "HTTPS"}]
def merge_requests_by_statuses(self, *statuses):
return MergeRequest.query.find(dict(
app_config_id=self.app.config._id,
status={'$in': statuses})).sort(
'request_number')
def all_merge_requests(self):
return MergeRequest.query.find(dict(
app_config_id=self.app.config._id)).sort(
'request_number')
@LazyProperty
def _additional_viewable_extensions(self):
ext_list = self.additional_viewable_extensions or ''
ext_list = [ext.strip() for ext in ext_list.split(',') if ext]
ext_list += ['.ini', '.gitignore', '.svnignore', 'README']
return ext_list
def guess_type(self, name):
'''Guess the mime type and encoding of a given filename'''
content_type, encoding = mimetypes.guess_type(name)
if content_type is None or not content_type.startswith('text/'):
fn, ext = os.path.splitext(name)
ext = ext or fn
if ext in self._additional_viewable_extensions:
content_type, encoding = 'text/plain', None
if content_type is None:
content_type, encoding = 'application/octet-stream', None
return content_type, encoding
def unknown_commit_ids(self):
from allura.model.repo_refresh import unknown_commit_ids as unknown_commit_ids_repo
return unknown_commit_ids_repo(self.all_commit_ids())
def refresh(self, all_commits=False, notify=True, new_clone=False, commits_are_new=None):
'''Find any new commits in the repository and update'''
try:
from allura.model.repo_refresh import refresh_repo
log.info('... %r analyzing', self)
self.set_status('analyzing')
refresh_repo(self, all_commits, notify, new_clone, commits_are_new)
finally:
log.info('... %s ready', self)
self.set_status('ready')
def push_upstream_context(self):
project, rest = h.find_project(self.upstream_repo.name)
with h.push_context(project._id):
app = project.app_instance(rest[0])
return h.push_context(project._id, app_config_id=app.config._id)
def pending_upstream_merges(self):
q = {
'downstream.project_id': self.project_id,
'downstream.mount_point': self.app.config.options.mount_point,
'status': 'open'}
with self.push_upstream_context():
return MergeRequest.query.find(q).count()
@property
def forks(self):
all_forks = self.query.find({'upstream_repo.name': self.url()}).all()
return [fork for fork in all_forks if fork.app_config is not None
and fork.app_config.project is not None]
def tarball(self, revision, path=None):
if path:
path = path.strip('/')
self._impl.tarball(revision, path)
def rev_to_commit_id(self, rev):
raise NotImplementedError('rev_to_commit_id')
def set_status(self, status):
'''
Update (and flush) the repo status indicator.
Updates to the repo status (or any Repository field) are considered
project updates (because Repositories are Artifacts; see
`Artifact.__mongometa__.before_save`) and thus change `last_updated`
on `c.project`, which causes `c.project` to be flushed.
Because repo status changes can come at the end or middle of a long
operation, `c.project` can be quite stale, so this flushes and reloads
`c.project`.
'''
from allura.model import Project
project_session = session(c.project)
if project_session:
session(c.project).flush(c.project)
session(c.project).expunge(c.project)
c.project = Project.query.get(_id=c.project._id)
self.status = status
session(self).flush(self)
def get_default_branch(self, default_branch_name):
branch_name = getattr(self, 'default_branch_name', None) or default_branch_name
branches = []
if not self.is_empty():
branches = [b.name for b in self.get_branches()]
if branches and branch_name not in branches:
if default_branch_name in branches:
branch_name = default_branch_name
else:
branch_name = branches[0]
self.set_default_branch(branch_name)
return branch_name
def merge_request_commits(self, mr):
"""Given MergeRequest :param mr: return list of commits to be merged"""
return self._impl.merge_request_commits(mr)
class MergeRequest(VersionedArtifact, ActivityObject):
statuses = ['open', 'merged', 'rejected']
class __mongometa__:
name = str('merge-request')
indexes = ['commit_id', 'creator_id']
unique_indexes = [('app_config_id', 'request_number')]
query: 'Query[MergeRequest]'
type_s = 'MergeRequest'
request_number = FieldProperty(int)
status = FieldProperty(str, if_missing='open')
downstream = FieldProperty(dict(
project_id=S.ObjectId,
mount_point=str,
commit_id=str))
source_branch = FieldProperty(str, if_missing='')
target_branch = FieldProperty(str)
creator_id = FieldProperty(S.ObjectId, if_missing=lambda: c.user._id)
created = FieldProperty(datetime, if_missing=datetime.utcnow)
summary = FieldProperty(str)
description = FieldProperty(str)
can_merge_cache = FieldProperty({str: bool})
new_commits = FieldProperty([S.Anything], if_missing=None) # don't access directly, use `commits` property
@property
def activity_name(self):
return 'merge request #%s' % self.request_number
@property
def type_name(self):
return 'merge request'
@property
def activity_extras(self):
d = ActivityObject.activity_extras.fget(self)
d.update(summary=self.summary)
return d
@LazyProperty
def creator(self):
from allura import model as M
return M.User.query.get(_id=self.creator_id)
@LazyProperty
def creator_name(self):
return self.creator.get_pref('display_name') or self.creator.username
@LazyProperty
def creator_url(self):
return self.creator.url()
@LazyProperty
def downstream_url(self):
with self.push_downstream_context():
return c.app.url
@LazyProperty
def downstream_repo(self):
with self.push_downstream_context():
return c.app.repo
def push_downstream_context(self):
return h.push_context(self.downstream.project_id, self.downstream.mount_point)
@property
def commits(self):
if self.new_commits is not None:
return self.new_commits
with self.push_downstream_context():
# update the cache key only, being careful not to touch anything else that ming will try to flush later
# this avoids race conditions with the `set_can_merge_cache()` caching and clobbering fields
new_commits = c.app.repo.merge_request_commits(self)
self.query.update({'$set': {'new_commits': new_commits}})
return new_commits
@classmethod
def upsert(cls, **kw):
num = cls.query.find(dict(
app_config_id=c.app.config._id)).count() + 1
while True:
try:
r = cls(request_number=num, **kw)
session(r).flush(r)
return r
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(r).expunge(r)
num += 1
def url(self):
return self.app.url + 'merge-requests/%s/' % self.request_number
def index(self):
result = Artifact.index(self)
result.update(
name_s='Merge Request #%d' % self.request_number,
type_s=self.type_s,
title='Merge Request #%d: %s' % (self.request_number, self.summary),
)
return result
@property
def email_subject(self):
return 'Merge request: ' + self.summary
def merge_allowed(self, user):
"""
Returns true if a merge is allowed by system and tool configuration.
"""
if not self.app.forkable:
return False
if self.status != 'open':
return False
if asbool(tg.config.get('scm.merge.{}.disabled'.format(self.app.config.tool_name))):
return False
if not h.has_access(self.app, 'write', user):
return False
if self.app.config.options.get('merge_disabled'):
return False
return True
def can_merge_cache_key(self):
"""
Returns key for can_merge_cache constructed from current
source & target branch commits.
"""
source_hash = self.downstream.commit_id
target_hash = self.app.repo.commit(self.target_branch)._id
key = '{}-{}'.format(source_hash, target_hash)
return key
def get_can_merge_cache(self):
"""Returns True/False or None in case of cache miss."""
key = self.can_merge_cache_key()
return self.can_merge_cache.get(key)
def set_can_merge_cache(self, val):
key = self.can_merge_cache_key()
# update the cache key only, being careful not to touch anything else that ming will try to flush later
# this avoids race conditions with the `commits()` caching and clobbering fields
can_merge_cache = self.can_merge_cache._deinstrument()
can_merge_cache[key] = val
self.query.update({'$set': {'can_merge_cache': can_merge_cache}})
def can_merge(self):
"""
Returns boolean indicating if automatic merge is possible (no
conflicts). If result is unknown yet, returns None and fires a task to
get the result. Caches result for later reuse.
"""
if not self.merge_allowed(c.user):
return None
if self.status == 'merged':
return True
cached = self.get_can_merge_cache()
if cached is not None:
return cached
in_progress = self.can_merge_task_status() in ['ready', 'busy']
if self.app.forkable and not in_progress:
from allura.tasks import repo_tasks
repo_tasks.can_merge.post(self._id)
def merge(self):
in_progress = self.merge_task_status() in ['ready', 'busy']
if self.app.forkable and not in_progress:
from allura.tasks import repo_tasks
repo_tasks.merge.post(self._id)
def _task_status(self, task_name):
task = MonQTask.query.find({
'state': {'$in': ['busy', 'complete', 'error', 'ready']}, # needed to use index
'task_name': task_name,
'args': [self._id],
'time_queue': {'$gt': datetime.utcnow() - timedelta(days=1)}, # constrain on index further
}).sort('_id', -1).limit(1).first()
if task:
return task.state
return None
def merge_task_status(self):
return self._task_status('allura.tasks.repo_tasks.merge')
def can_merge_task_status(self):
return self._task_status('allura.tasks.repo_tasks.can_merge')
def commits_task_status(self):
return self._task_status('allura.tasks.repo_tasks.determine_mr_commits')
def add_meta_post(self, changes):
tmpl = g.jinja2_env.get_template('allura:templates/repo/merge_request_changed.html')
message = tmpl.render(changes=changes)
self.discussion_thread.add_post(text=message, is_meta=True, ignore_security=True)
class RepoObject(object):
def __repr__(self): # pragma no cover
return '<%s %s>' % (
self.__class__.__name__, self._id)
def primary(self):
return self
def index_id(self):
'''Globally unique artifact identifier. Used for
SOLR ID, shortlinks, and maybe elsewhere
'''
id = '%s.%s#%s' % (
'allura.model.repo', # preserve index_id after module consolidation
self.__class__.__name__,
self._id)
return id.replace('.', '/')
@classmethod
def upsert(cls, id, **kwargs):
isnew = False
r = cls.query.get(_id=id)
if r is not None:
return r, isnew
try:
r = cls(_id=id, **kwargs)
session(r).flush(r)
isnew = True
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(r).expunge(r)
r = cls.query.get(_id=id)
return r, isnew
# this is duplicative with the Commit model
# would be nice to get rid of this "doc" based view, but it is used a lot
CommitDoc = collection(
str('repo_ci'), main_doc_session,
Field('_id', str),
Field('tree_id', str),
Field('committed', SUser),
Field('authored', SUser),
Field('message', str),
Field('parent_ids', [str], index=True),
Field('child_ids', [str], index=True),
Field('repo_ids', [S.ObjectId()], index=True))
class Commit(MappedClass, RepoObject, ActivityObject):
# Basic commit information
# One of these for each commit in the physical repo on disk
class __mongometa__:
session = repository_orm_session
name = str('repo_ci')
indexes = [
'parent_ids',
'child_ids',
'repo_ids',
]
query: 'Query[Commit]'
_id = FieldProperty(str) # hexsha of the commit (for Git and Hg)
tree_id = FieldProperty(str)
committed = FieldProperty(SUser)
authored = FieldProperty(SUser)
message = FieldProperty(str)
parent_ids = FieldProperty([str])
child_ids = FieldProperty([str])
repo_ids = FieldProperty([S.ObjectId()])
type_s = 'Commit'
# Ephemeral attrs
repo = None
def __init__(self, **kw):
for k, v in six.iteritems(kw):
setattr(self, k, v)
@property
def activity_name(self):
return self.shorthand_id()
@property
def activity_extras(self):
d = ActivityObject.activity_extras.fget(self)
d.update(summary=self._summary(limit=500))
if self.repo:
d.update(app_config_id=self.repo.app.config._id)
return d
def has_activity_access(self, perm, user, activity):
"""
Check access against the original app.
Commits have no ACLs and are therefore always viewable by any user, if
they have access to the tool.
"""
app_config_id = activity.obj.activity_extras.get('app_config_id')
if app_config_id:
app_config = AppConfig.query.get(_id=app_config_id)
return has_access(app_config, perm, user)
return True
def set_context(self, repo):
self.repo = repo
@LazyProperty
def authored_user(self):
return User.by_email_address(self.authored.email)
@LazyProperty
def committed_user(self):
return User.by_email_address(self.committed.email)
@LazyProperty
def author_url(self):
u = self.authored_user
if u:
return u.url()
@LazyProperty
def committer_url(self):
u = self.committed_user
if u:
return u.url()
@LazyProperty
def tree(self):
return self.get_tree(create=True)
def get_tree(self, create=True):
if self.tree_id is None and create:
self.tree_id = self.repo.compute_tree_new(self)
if self.tree_id is None:
return None
cache = getattr(c, 'model_cache', '') or ModelCache()
t = cache.get(Tree, dict(_id=self.tree_id))
if t is None and create:
self.tree_id = self.repo.compute_tree_new(self)
t = Tree.query.get(_id=self.tree_id)
cache.set(Tree, dict(_id=self.tree_id), t)
if t is not None:
t.set_context(self)
return t
@LazyProperty
def summary(self):
return self._summary()
def _summary(self, limit=50):
message = h.really_unicode(self.message)
first_line = message.split('\n')[0]
return h.text.truncate(first_line, limit)
def shorthand_id(self):
if self.repo is None:
self.repo = self.guess_repo()
if self.repo is None:
return repr(self)
return self.repo.shorthand_for_commit(self._id)
@LazyProperty
def symbolic_ids(self):
return self.repo.symbolics_for_commit(self)
def url(self):
if self.repo is None:
self.repo = self.guess_repo()
if self.repo is None:
return '#'
return self.repo.url_for_commit(self)
def guess_repo(self):
import traceback
log.error('guess_repo: should not be called: %s' %
''.join(traceback.format_stack()))
for ac in c.project.app_configs:
try:
app = c.project.app_instance(ac)
if app.repo._id in self.repo_ids:
return app.repo
except AttributeError:
pass
return None
def link_text(self):
'''The link text that will be used when a shortlink to this artifact
is expanded into an <a></a> tag.
By default this method returns type_s + shorthand_id(). Subclasses should
override this method to provide more descriptive link text.
'''
return self.shorthand_id()
def context(self):
result = dict(prev=None, next=None)
if self.parent_ids:
result['prev'] = self.query.find(
dict(_id={'$in': self.parent_ids})).all()
for ci in result['prev']:
ci.set_context(self.repo)
if self.child_ids:
result['next'] = self.query.find(
dict(_id={'$in': self.child_ids})).all()
for ci in result['next']:
ci.set_context(self.repo)
return result
@LazyProperty
def diffs(self):
return self.paged_diffs()
def paged_diffs(self, start=0, end=None, onlyChangedFiles=False):
diffs = self.repo.paged_diffs(self._id, start, end, onlyChangedFiles)
return Object(
added=sorted(diffs['added']),
removed=sorted(diffs['removed']),
changed=sorted(diffs['changed']),
copied=sorted(diffs['copied'], key=itemgetter('new', 'old')), # this is a list of dicts
renamed=sorted(diffs['renamed'], key=itemgetter('new', 'old')), # this is a list of dicts
total=diffs['total'])
def get_path(self, path, create=True):
path = path.lstrip('/')
parts = path.split('/')
cur = self.get_tree(create)
if cur is not None:
for part in parts:
if part != '':
cur = cur[part]
return cur
@LazyProperty
def changed_paths(self):
'''
Returns a list of paths changed in this commit.
Leading and trailing slashes are removed, and
the list is complete, meaning that if a sub-path
is changed, all of the parent paths are included
(including '' to represent the root path).
Example:
If the file /foo/bar is changed in the commit,
this would return ['', 'foo', 'foo/bar']
'''
changes = self.repo.get_changes(self._id)
changed_paths = set()
for change in changes:
node = h.really_unicode(change).strip('/')
changed_paths.add(node)
node_path = os.path.dirname(node)
while node_path:
changed_paths.add(node_path)
node_path = os.path.dirname(node_path)
changed_paths.add('') # include '/' if there are any changes
return changed_paths
@LazyProperty
def added_paths(self):
'''
Returns a list of paths added in this commit.
Leading and trailing slashes are removed, and
the list is complete, meaning that if a directory
with subdirectories is added, all of the child
paths are included (this relies on the :meth paged_diffs:
being complete).
Example:
If the directory /foo/bar/ is added in the commit
which contains a subdirectory /foo/bar/baz/ with
the file /foo/bar/baz/qux.txt, this would return:
['foo/bar', 'foo/bar/baz', 'foo/bar/baz/qux.txt']
'''
paths = set()
for path in self.paged_diffs()['added']:
paths.add(path.strip('/'))
return paths
@LazyProperty
def info(self):
return dict(
id=self._id,
author=self.authored.name,
author_email=self.authored.email,
date=self.authored.date,
author_url=self.author_url,
shortlink=self.shorthand_id(),
summary=self.summary
)
@LazyProperty
def webhook_info(self):
return {
'id': self._id,
'url': h.absurl(self.url()),
'timestamp': self.authored.date,
'message': self.summary,
'author': {
'name': self.authored.name,
'email': self.authored.email,
'username': self.authored_user.username if self.authored_user else '',
},
'committer': {
'name': self.committed.name,
'email': self.committed.email,
'username': self.committed_user.username if self.committed_user else '',
},
'added': self.diffs.added,
'removed': self.diffs.removed,
'modified': self.diffs.changed,
'copied': self.diffs.copied,
'renamed': self.diffs.renamed,
}
class Tree(MappedClass, RepoObject):
# Basic tree information
class __mongometa__:
session = repository_orm_session
name = str('repo_tree')
indexes = [
]
query: 'Query[Tree]'
_id = FieldProperty(str)
tree_ids = FieldProperty([dict(name=str, id=str)])
blob_ids = FieldProperty([dict(name=str, id=str)])
other_ids = FieldProperty([dict(name=str, id=str, type=SObjType)])
# Ephemeral attrs
repo = None
commit = None
parent = None
name = None
def __getitem__(self, name):
cache = getattr(c, 'model_cache', '') or ModelCache()
obj = self.by_name[name]
if obj['type'] == 'blob':
return Blob(self, name, obj['id'])
if obj['type'] == 'submodule':
log.info('Skipping submodule "%s"' % name)
raise KeyError(name)
obj = cache.get(Tree, dict(_id=obj['id']))
if obj is None:
oid = self.repo.compute_tree_new(
self.commit, self.path() + name + '/')
obj = cache.get(Tree, dict(_id=oid))
if obj is None:
raise KeyError(name)
obj.set_context(self, name)
return obj
def get_obj_by_path(self, path):
if hasattr(path, 'get'):
path = path['new']
if path.startswith('/'):
path = path[1:]
path = path.split('/')
obj = self
for p in path:
try:
obj = obj[p]
except KeyError:
return None
return obj
def get_blob_by_path(self, path):
obj = self.get_obj_by_path(path)
return obj if isinstance(obj, Blob) else None
def set_context(self, commit_or_tree, name=None):
assert commit_or_tree is not self
self.repo = commit_or_tree.repo
if name:
self.commit = commit_or_tree.commit
self.parent = commit_or_tree
self.name = name
else:
self.commit = commit_or_tree
def readme(self):
'returns (filename, unicode text) if a readme file is found'
for x in self.blob_ids:
if README_RE.match(x.name):
name = x.name
blob = self[name]
return (x.name, h.really_unicode(blob.text))
return None, None
def ls(self):
'''
List the entries in this tree, with historical commit info for
each node.
'''
last_commit = LastCommit.get(self)
# ensure that the LCD is saved, even if
# there is an error later in the request
if last_commit:
session(last_commit).flush(last_commit)
return self._lcd_map(last_commit)
else:
return []
def _lcd_map(self, lcd):
'''
Map "last-commit docs" to the structure that templates expect.
(This exists because LCD logic changed in the past, whereas templates
were not changed)
'''
if lcd is None:
return []
commit_ids = [e.commit_id for e in lcd.entries]
commits = list(Commit.query.find(dict(_id={'$in': commit_ids})))
for commit in commits:
commit.set_context(self.repo)
commit_infos = {c._id: c.info for c in commits}
tree_names = sorted([n.name for n in self.tree_ids])
blob_names = sorted(
[n.name for n in chain(self.blob_ids, self.other_ids)])
results = []
for type, names in (('DIR', tree_names), ('BLOB', blob_names)):
for name in names:
commit_info = commit_infos.get(lcd.by_name.get(name))
if not commit_info:
commit_info = defaultdict(str)
elif 'id' in commit_info:
commit_info['href'] = self.repo.url_for_commit(
commit_info['id'])
results.append(dict(
kind=type,
name=name,
href=name,
last_commit=dict(
author=commit_info['author'],
author_email=commit_info['author_email'],
author_url=commit_info['author_url'],
date=commit_info.get('date'),
href=commit_info.get('href', ''),
shortlink=commit_info['shortlink'],
summary=commit_info['summary'],
),
))
return results
def path(self):
if self.parent:
assert self.parent is not self
return self.parent.path() + self.name + '/'
else:
return '/'
def url(self):
return self.commit.url() + 'tree' + self.path()
@LazyProperty
def by_name(self):
d = Object((x.name, x) for x in self.other_ids)
d.update(
(x.name, Object(x, type='tree'))
for x in self.tree_ids)
d.update(
(x.name, Object(x, type='blob'))
for x in self.blob_ids)
return d
def is_blob(self, name):
return self.by_name[name]['type'] == 'blob'
def get_blob(self, name):
x = self.by_name[name]
return Blob(self, name, x.id)
class Blob(object):
'''Lightweight object representing a file in the repo'''
def __init__(self, tree, name, _id):
self._id = _id
self.tree = tree
self.name = name
self.repo = tree.repo
self.commit = tree.commit
fn, ext = os.path.splitext(self.name)
self.extension = ext or fn
def path(self):
return self.tree.path() + h.really_unicode(self.name)
def url(self):
return self.tree.url() + h.really_unicode(self.name)
@LazyProperty
def _content_type_encoding(self):
return self.repo.guess_type(self.name)
@LazyProperty
def content_type(self):
return self._content_type_encoding[0]
@LazyProperty
def content_encoding(self):
return self._content_type_encoding[1]
@property
def has_pypeline_view(self):
if README_RE.match(self.name) or self.extension in PYPELINE_EXTENSIONS:
return True
return False
@LazyProperty
def has_html_view(self):
'''
Return true if file is a text file that can be displayed.
:return: boolean
'''
if self.extension in self.repo._additional_viewable_extensions:
return True
if self.extension in BINARY_EXTENSIONS:
return False
if (self.content_type.startswith('text/') or
self.extension in VIEWABLE_EXTENSIONS or
self.extension in PYPELINE_EXTENSIONS or
utils.is_text_file(self.text)):
return True
return False
@property
def has_image_view(self):
return self.content_type.startswith('image/')
def open(self):
return self.repo.open_blob(self)
def __iter__(self):
return iter(self.open())
@LazyProperty
def size(self):
return self.repo.blob_size(self)
@LazyProperty
def text(self):
"""
Direct binary contents of file. Convert with h.really_unicode() if you think it is textual.
"""
return self.open().read()
class EmptyBlob(Blob):
def __init__(self):
pass
def open(self):
return ''
@property
def size(self):
return 0
def __nonzero__(self):
return False
def __bool__(self):
return False
# this is duplicative with the LastCommit model
# would be nice to get rid of this "doc" based view, but it is used a lot
LastCommitDoc = collection(
str('repo_last_commit'), main_doc_session,
Field('_id', S.ObjectId()),
Field('commit_id', str),
Field('path', str),
Index('commit_id', 'path'),
Field('entries', [dict(
name=str,
commit_id=str)]))
class LastCommit(MappedClass, RepoObject):
# Information about the last commit to touch a tree
class __mongometa__:
session = repository_orm_session
name = str('repo_last_commit')
indexes = [
('commit_id', 'path'),
]
query: 'Query[LastCommit]'
_id = FieldProperty(S.ObjectId)
commit_id = FieldProperty(str)
path = FieldProperty(str)
entries = FieldProperty([dict(
name=str,
commit_id=str,
)])
def __repr__(self):
return '<LastCommit /%r %s>' % (self.path, self.commit_id)
@classmethod
def _last_commit_id(cls, commit, path):
if path == '':
# on the top-level dir, the relevant commit should always be the current commit
return commit._id
try:
rev = next(commit.repo.log(commit._id, path, id_only=True, limit=1))
return commit.repo.rev_to_commit_id(rev)
except StopIteration:
log.error('Tree node not recognized by SCM: %s @ %s',
path, commit._id)
return commit._id
@classmethod
def _prev_commit_id(cls, commit, path):
if not commit.parent_ids or path in commit.added_paths:
return None # new paths by definition have no previous LCD
lcid_cache = getattr(c, 'lcid_cache', '')
if lcid_cache != '' and path in lcid_cache:
return lcid_cache[path]
try:
log_iter = commit.repo.log(commit._id, path, id_only=True, limit=2)
next(log_iter)
rev = next(log_iter)
return commit.repo.rev_to_commit_id(rev)
except StopIteration:
return None
@classmethod
def get(cls, tree):
'''Find or build the LastCommitDoc for the given tree.'''
cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
last_commit_id = cls._last_commit_id(tree.commit, path)
lcd = cache.get(cls, {'path': path, 'commit_id': last_commit_id})
if lcd is None:
commit = cache.get(Commit, {'_id': last_commit_id})
commit.set_context(tree.repo)
lcd = cls._build(commit.get_path(path))
return lcd
@classmethod
def _build(cls, tree):
'''
Build the LCD record, presuming that this tree is where it was most
recently changed.
'''
model_cache = getattr(c, 'model_cache', '') or ModelCache()
path = tree.path().strip('/')
entries = []
prev_lcd = None
prev_lcd_cid = cls._prev_commit_id(tree.commit, path)
if prev_lcd_cid:
prev_lcd = model_cache.get(
cls, {'path': path, 'commit_id': prev_lcd_cid})
entries = {}
nodes = set(
[node.name for node in chain(tree.tree_ids, tree.blob_ids, tree.other_ids)])
changed = set(
[node for node in nodes if os.path.join(path, node) in tree.commit.changed_paths])
unchanged = [os.path.join(path, node) for node in nodes - changed]
if prev_lcd:
# get unchanged entries from previously computed LCD
entries = prev_lcd.by_name
elif unchanged:
# no previously computed LCD, so get unchanged entries from SCM
# (but only ask for the ones that we know we need)
entries = tree.commit.repo.last_commit_ids(tree.commit, unchanged)
if entries is None:
# something strange went wrong; still show the list of files
# and possibly try again later
entries = {}
# paths are fully-qualified; shorten them back to just node names
entries = {
os.path.basename(path): commit_id for path, commit_id in six.iteritems(entries)}
# update with the nodes changed in this tree's commit
entries.update({node: tree.commit._id for node in changed})
# convert to a list of dicts, since mongo doesn't handle arbitrary keys
# well (i.e., . and $ not allowed)
entries = [{'name': name, 'commit_id': value}
for name, value in six.iteritems(entries)]
lcd = cls(
commit_id=tree.commit._id,
path=path,
entries=entries,
)
model_cache.set(cls, {'path': path, 'commit_id': tree.commit._id}, lcd)
return lcd
@LazyProperty
def by_name(self):
return {n.name: n.commit_id for n in self.entries}
class ModelCache(object):
'''
Cache model instances based on query params passed to get. LRU cache.
This does more caching than ming sessions (which only cache individual objects by _id)
The added complexity here may be unnecessary premature optimization, but
should be quite helpful when building up many models in order, like lcd _build
for a series of several new commits.
'''
def __init__(self, max_instances=None, max_queries=None):
'''
By default, each model type can have 2000 instances and
8000 queries. You can override these for specific model
types by passing in a dict() for either max_instances or
max_queries keyed by the class(es) with the max values.
Classes not in the dict() will use the default 2000/8000
default.
If you pass in a number instead of a dict, that value will
be used as the max for all classes.
'''
max_instances_default = 2000
max_queries_default = 8000
if isinstance(max_instances, int):
max_instances_default = max_instances
if isinstance(max_queries, int):
max_queries_default = max_queries
self._max_instances = defaultdict(lambda: max_instances_default)
self._max_queries = defaultdict(lambda: max_queries_default)
if hasattr(max_instances, 'items'):
self._max_instances.update(max_instances)
if hasattr(max_queries, 'items'):
self._max_queries.update(max_queries)
# keyed by query, holds _id
self._query_cache = defaultdict(OrderedDict)
self._instance_cache = defaultdict(OrderedDict) # keyed by _id
def _normalize_query(self, query):
_query = query
if not isinstance(_query, tuple):
_query = tuple(sorted(list(_query.items()), key=lambda k: k[0]))
return _query
def _model_query(self, cls):
if hasattr(cls, 'query'):
return cls.query
elif hasattr(cls, 'm'):
return cls.m
else:
raise AttributeError(
'%s has neither "query" nor "m" attribute' % cls)
def get(self, cls, query):
_query = self._normalize_query(query)
self._touch(cls, _query)
if _query not in self._query_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
_id = self._query_cache[cls][_query]
if _id is None:
return None
if _id not in self._instance_cache[cls]:
val = self._model_query(cls).get(**query)
self.set(cls, _query, val)
return val
return self._instance_cache[cls][_id]
def set(self, cls, query, val):
_query = self._normalize_query(query)
if val is not None:
_id = getattr(val, '_model_cache_id',
getattr(val, '_id',
self._query_cache[cls].get(_query,
None)))
if _id is None:
_id = val._model_cache_id = bson.ObjectId()
self._query_cache[cls][_query] = _id
self._instance_cache[cls][_id] = val
else:
self._query_cache[cls][_query] = None
self._touch(cls, _query)
self._check_sizes(cls)
def _touch(self, cls, query):
'''
Keep track of insertion order, prevent duplicates,
and expire from the cache in a FIFO manner.
'''
_query = self._normalize_query(query)
if _query not in self._query_cache[cls]:
return
_id = self._query_cache[cls].pop(_query)
self._query_cache[cls][_query] = _id
if _id not in self._instance_cache[cls]:
return
val = self._instance_cache[cls].pop(_id)
self._instance_cache[cls][_id] = val
def _check_sizes(self, cls):
if self.num_queries(cls) > self._max_queries[cls]:
_id = self._remove_least_recently_used(self._query_cache[cls])
if _id in self._instance_cache[cls]:
instance = self._instance_cache[cls][_id]
self._try_flush(instance, expunge=False)
if self.num_instances(cls) > self._max_instances[cls]:
instance = self._remove_least_recently_used(
self._instance_cache[cls])
self._try_flush(instance, expunge=True)
def _try_flush(self, instance, expunge=False):
try:
inst_session = session(instance)
except AttributeError:
inst_session = None
if inst_session:
inst_session.flush(instance)
if expunge:
inst_session.expunge(instance)
def _remove_least_recently_used(self, cache):
# last-used (most-recently-used) is last in cache, so take first
key, val = cache.popitem(last=False)
return val
def num_queries(self, cls=None):
if cls is None:
return sum([len(c) for c in self._query_cache.values()])
else:
return len(self._query_cache[cls])
def num_instances(self, cls=None):
if cls is None:
return sum([len(c) for c in self._instance_cache.values()])
else:
return len(self._instance_cache[cls])
def instance_ids(self, cls):
return list(self._instance_cache[cls].keys())
def batch_load(self, cls, query, attrs=None):
'''
Load multiple results given a query.
Optionally takes a list of attribute names to use
as the cache key. If not given, uses the keys of
the given query.
'''
if attrs is None:
attrs = list(query.keys())
for result in self._model_query(cls).find(query):
keys = {a: getattr(result, a) for a in attrs}
self.set(cls, keys, result)
class GitLikeTree(object):
'''
A tree node similar to that which is used in git
:var dict blobs: files at this level of the tree. name => oid
:var dict trees: subtrees (child dirs). name => GitLikeTree
'''
def __init__(self):
self.blobs = {} # blobs[name] = oid
self.trees = defaultdict(GitLikeTree) # trees[name] = GitLikeTree()
self._hex = None
def get_tree(self, path):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
if not path:
return self
cur = self
for part in path.split('/'):
cur = cur.trees[part]
return cur
def get_blob(self, path):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
path_parts = path.split('/')
dirpath, last = path_parts[:-1], path_parts[-1]
cur = self
for part in dirpath:
cur = cur.trees[part]
return cur.blobs[last]
def set_blob(self, path, oid):
path = h.really_unicode(path)
if path.startswith('/'):
path = path[1:]
path_parts = path.split('/')
dirpath, filename = path_parts[:-1], path_parts[-1]
cur = self
for part in dirpath:
cur = cur.trees[part]
cur.blobs[filename] = oid
def hex(self):
'''Compute a recursive sha1 hash on the tree'''
# dependent on __repr__ below
if self._hex is None:
sha_obj = sha1(b'tree\n' + six.ensure_binary(repr(self)))
self._hex = sha_obj.hexdigest()
return self._hex
def __repr__(self):
# this can't change, is used in hex() above
lines = ['t %s %s' % (t.hex(), h.really_unicode(name))
for name, t in six.iteritems(self.trees)]
lines += ['b %s %s' % (oid, h.really_unicode(name))
for name, oid in six.iteritems(self.blobs)]
return six.ensure_str('\n'.join(sorted(lines)))
def __unicode__(self):
return self.pretty_tree(recurse=False)
def pretty_tree(self, indent=0, recurse=True, show_id=True):
'''For debugging, show a nice tree representation'''
lines = [' ' * indent + 't %s %s' %
(name, '\n' + t.unicode_full_tree(indent + 2, show_id=show_id)
if recurse else t.hex())
for name, t in sorted(six.iteritems(self.trees))]
lines += [' ' * indent + 'b %s %s' % (name, oid if show_id else '')
for name, oid in sorted(six.iteritems(self.blobs))]
output = h.really_unicode('\n'.join(lines)).encode('utf-8')
return output
def topological_sort(graph):
'''Return the topological sort of a graph.
The graph is a dict with each entry representing
a node (the key is the node ID) and its parent(s) (a
set of node IDs). Result is an iterator over the topo-sorted
node IDs.
The algorithm is based on one seen in
http://en.wikipedia.org/wiki/Topological_sorting#CITEREFKahn1962
'''
# Index children, identify roots
children = defaultdict(list)
roots = []
for nid, parents in list(graph.items()):
if not parents:
graph.pop(nid)
roots.append(nid)
for p_nid in parents:
children[p_nid].append(nid)
# Topo sort
while roots:
n = roots.pop()
yield n
for child in children[n]:
graph[child].remove(n)
if not graph[child]:
graph.pop(child)
roots.append(child)
assert not graph, 'Cycle detected'
def prefix_paths_union(a, b):
"""
Given two sets of paths, a and b, find the items from a that
are either in b or are parent directories of items in b.
"""
union = a & b
prefixes = a - b
candidates = b - a
for prefix in prefixes:
for candidate in candidates:
if candidate.startswith(prefix + '/'):
union.add(prefix)
break
return union
def zipdir(source, zipfile, exclude=None):
"""Create zip archive using zip binary."""
zipbin = tg.config.get('scm.repos.tarball.zip_binary', '/usr/bin/zip')
source = source.rstrip('/')
# this is needed to get proper prefixes inside zip-file
working_dir = os.path.dirname(source)
source_fn = os.path.basename(source)
command = [zipbin, '-y', '-q', '-r', zipfile, source_fn.encode('utf-8')]
if exclude:
command += ['-x', exclude]
p = Popen(command, cwd=working_dir, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(
"Command: {0} returned non-zero exit code {1}\n"
"STDOUT: {2}\n"
"STDERR: {3}".format(command, p.returncode, stdout, stderr))
Mapper.compile_all()
|
meteor_sort.py
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Sequential
from os.path import join
from os import listdir, getcwd
import multiprocessing
from tensorflow.keras.constraints import unit_norm
from auxiliar.meteor_sort_learning_rate import meteor_sort_learning_rate
from auxiliar.performance_measure import get_performance_measures, plot_acc_and_loss
from tensorflow import lite
class MeteorSortCallback(Callback):
"""
Class to handle the `meteor_sort()` callbacks. This class is used to store the model weights if some conditions are
satisfied.
"""
def __init__(self, threshold_train: float, threshold_valid: float, model: Sequential, model_description: str,
weights_dir: str):
"""
Constructor of the `MeteorSortCallback` class.
:param threshold_train: value of accuracy in training set from which we can save the model
:param threshold_valid: value of accuracy in validation set from which we can save the model
:param model: tensorflow.keras.models.Sequential model that is being trained
:param model_description: short model description
:param weights_dir: directory where to save the weights file
"""
super(MeteorSortCallback, self).__init__()
self.threshold_train = threshold_train
self.threshold_valid = threshold_valid
self.model_description = model_description
self.dir_weights = weights_dir
self.model = model
def on_epoch_end(self, epoch, logs=None):
if (logs.get('accuracy') >= self.threshold_train) and (logs.get('val_accuracy') >= self.threshold_valid):
self.model.save_weights(
join(self.dir_weights, self.model_description + '_acc_' + str(logs.get('accuracy'))[0:5]
+ '_val_acc_' + str(logs.get('val_accuracy'))[0:5] + '.h5'), save_format='h5')
def meteor_sort() -> None:
"""
Meteor sort training main script. In this case we carry out the following tasks:
- Generate the training and validation generator.
- Create the tensorflow (keras) model.
- If enabled, run the `meteor_sort_learning_rate()` function.
- Train the model.
- If enabled, convert the model to tensorflow lite and run `get_performance_measures()` and
`plot_acc_and_loss()` methods to get the performance measures (precision, recall and F1-Score) and plot
the accuracy and loss over the training iterations in both the training and the validation sets.
:return: None
"""
tf.keras.backend.clear_session()
# data
data_dir = join(getcwd(), "meteor_data")
train_dir = join(data_dir, 'train')
validation_dir = join(data_dir, 'validation')
# Model handling
model_to_convert = ""
model_name = 'model_v2_1'
results_dir = join(getcwd(), 'results')
results_dir_weights = join(results_dir, 'weights')
# Hyperparameters for the training
image_resolution: tuple = (256, 256)
image_resolution_gray_scale: tuple = (256, 256, 1)
batch_size: int = 64
epochs: int = 10
learning_rate: float = 5e-4
get_ideal_learning_rate: bool = False
train_set_threshold: float = 0.92
validation_set_threshold: float = 0.93
lightweight_training: bool = True
lightweight_training_factor: int = 4
num_training_images = len(listdir(join(train_dir, 'meteors'))) + len(listdir(join(train_dir, 'non_meteors')))
num_validation_images = len(listdir(join(validation_dir, 'meteors'))) \
+ len(listdir(join(validation_dir, 'non_meteors')))
# Lightweight training (with fewer images)
if lightweight_training:
num_training_images = num_training_images / lightweight_training_factor
num_validation_images = num_validation_images / lightweight_training_factor
steps_per_epoch: int = int(num_training_images / batch_size)
validation_steps: int = int(num_validation_images / batch_size)
# Rescale all images by 1./255
train_datagen = ImageDataGenerator(rescale=1.0 / 255,
rotation_range=10, # Range from 0 to 180 degrees to randomly rotate images
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=5, # Shear the image by 5 degrees
zoom_range=0.1,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255.)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=image_resolution)
validation_generator = validation_datagen.flow_from_directory(validation_dir,
batch_size=batch_size,
class_mode='binary',
color_mode='grayscale',
target_size=image_resolution)
model = Sequential([
Conv2D(8, (7, 7), activation='elu', input_shape=image_resolution_gray_scale,
strides=1, kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(3, 3)),
BatchNormalization(),
Conv2D(12, (5, 5), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(3, 3)),
BatchNormalization(),
Conv2D(12, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Conv2D(8, (3, 3), activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
MaxPooling2D(pool_size=(2, 2)),
BatchNormalization(),
Flatten(),
Dense(200, activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
BatchNormalization(),
Dense(16, activation='elu', kernel_initializer='he_uniform', kernel_constraint=unit_norm()),
BatchNormalization(),
Dense(1, activation='sigmoid', kernel_initializer='he_uniform')
])
if get_ideal_learning_rate:
meteor_sort_learning_rate(model, train_dir, image_resolution, batch_size, epochs, steps_per_epoch)
print(model.summary())
optimizer = Adam(learning_rate=learning_rate)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
my_callback = MeteorSortCallback(train_set_threshold, validation_set_threshold, model, model_name,
results_dir_weights)
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=validation_steps,
shuffle=True,
verbose=1,
callbacks=[my_callback])
# Print model performance and get performance measures
if model_to_convert != "":
# Load model to convert weights:
model.load_weights(join(results_dir, model_to_convert))
# Convert model to tflite:
converter = lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open("meteor_sort_tflite_model.tflite", "wb").write(tflite_model)
# Get performance measures:
get_performance_measures(model, train_dir, image_resolution,
join(results_dir, 'performance_' + model_name + '.txt'), threshold=0.50)
# Plot Accuracy and Loss in both train and validation sets
plot_acc_and_loss(history, results_dir, model_name[-5:])
if __name__ == '__main__':
p = multiprocessing.Process(target=meteor_sort)
p.start()
p.join()
|
diskover_socket_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2019
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from diskover import q_crawl, adaptive_batch, config, get_time
from diskover_bot_module import scrape_tree_meta
import socket
import subprocess
try:
import queue as Queue
except ImportError:
import Queue
import threading
import uuid
import json
import time
import sys
import pickle
import struct
# dict to hold socket tasks
#socket_tasks = {}
# list of socket client
#clientlist = []
def socket_thread_handler(threadnum, q, cliargs, logger):
"""This is the socket thread handler function.
It runs the command msg sent from client.
"""
BUFF = 1024
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
data = clientsock.recv(BUFF)
data = data.decode('utf-8')
logger.debug('received data: %s' % data)
if not data:
q.task_done()
# close connection to client
clientsock.close()
logger.debug("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
continue
# check if ping msg
if data == 'ping':
logger.info("[thread-%s]: Got ping from %s" % (threadnum, str(addr)))
# send pong reply
message = b'pong'
clientsock.send(message)
logger.debug('sending data: %s' % message)
else:
# strip away any headers sent by curl
data = data.split('\r\n')[-1]
logger.debug("[thread-%s]: Got command from %s" % (threadnum, str(addr)))
# load json and store in dict
command_dict = json.loads(data)
logger.debug(command_dict)
# run command from json data
logger.info("[thread-%s]: Request -> %s" % (threadnum, command_dict))
run_command(threadnum, command_dict, clientsock, cliargs, logger)
q.task_done()
# close connection to client
clientsock.close()
logger.debug("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
except (ValueError, TypeError) as e:
q.task_done()
logger.error("[thread-%s]: Invalid JSON from %s: (%s)" % (threadnum, str(addr), e))
message = b'{"msg": "error", "error": "Invalid JSON caused by %s"}\n' % str(e).encode('utf-8')
clientsock.send(message)
logger.debug(message)
# close connection to client
clientsock.close()
logger.debug("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
except socket.error as e:
q.task_done()
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
# close connection to client
clientsock.close()
logger.debug("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
pass
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def recv_one_message(sock):
lengthbuf = recvall(sock, 4)
if not lengthbuf:
return None
length, = struct.unpack('!I', lengthbuf)
return recvall(sock, length)
def socket_thread_handler_twc(threadnum, q, q_kill, lock, rootdir, num_sep, level,
batchsize, cliargs, logger, reindex_dict):
"""This is the socket thread handler tree walk client function.
Stream of directory listings (pickle) from diskover treewalk
client connections are enqueued to redis rq queue.
"""
while True:
try:
c = q.get()
clientsock, addr = c
logger.debug(clientsock)
logger.debug(addr)
totalfiles = 0
while True:
data = recv_one_message(clientsock)
if not data:
break
if data == b'SIGKILL' or data == 'SIGKILL':
q_kill.put(b'SIGKILL')
break
# unpickle data sent from client
data_decoded = pickle.loads(data)
logger.debug(data_decoded)
# enqueue to redis
batch = []
for root, dirs, files in data_decoded:
files_len = len(files)
totalfiles += files_len
# check for empty dirs
if len(dirs) == 0 and len(files) == 0 and not cliargs['indexemptydirs']:
continue
batch.append((root, dirs, files))
batch_len = len(batch)
if batch_len >= batchsize or (cliargs['adaptivebatch'] and totalfiles >= config['adaptivebatch_maxfiles']):
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,),
result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (batch_len, batchsize))
del batch[:]
totalfiles = 0
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
if len(batch) > 0:
# add any remaining in batch to queue
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,), result_ttl=config['redis_ttl'])
del batch[:]
# close connection to client
clientsock.close()
logger.debug("[thread-%s]: %s closed connection" % (threadnum, str(addr)))
q.task_done()
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
def start_socket_server(cliargs, logger):
"""This is the start socket server function.
It opens a socket and waits for remote commands.
"""
#global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
port = config['listener_port'] # default is 9999
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
# create thread
t = threading.Thread(target=socket_thread_handler, args=(i, q, cliargs, logger,))
t.daemon = True
t.start()
while True:
logger.debug("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.debug("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
#clientlist.append(client)
# add task to Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
q.join()
serversock.close()
sys.exit(0)
def start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the start socket server tree walk function.
It opens a socket and waits for diskover tree walk client
connections.
"""
#global clientlist
# set thread/connection limit
max_connections = config['listener_maxconnections']
# Queue for socket threads
q = Queue.Queue(maxsize=max_connections)
q_kill = Queue.Queue()
lock = threading.Lock()
try:
# create TCP socket object
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = config['listener_host'] # default is localhost
if cliargs['twcport']:
port = cliargs['twcport']
else:
port = config['listener_twcport'] # default is 9998
# bind to port
serversock.bind((host, port))
# start listener
serversock.listen(max_connections)
# set up the threads and start them
for i in range(max_connections):
t = threading.Thread(
target=socket_thread_handler_twc,
args=(i, q, q_kill, lock, rootdir_path, num_sep,
level, batchsize, cliargs, logger, reindex_dict,))
t.daemon = True
t.start()
starttime = time.time()
while True:
if q_kill.qsize() > 0:
logger.info("Received signal to shutdown socket server")
q.join()
serversock.close()
return starttime
logger.debug("Waiting for connection, listening on %s port %s TCP (ctrl-c to shutdown)"
% (str(host), str(port)))
# establish connection
clientsock, addr = serversock.accept()
logger.debug(clientsock)
logger.debug(addr)
logger.debug("Got a connection from %s" % str(addr))
# add client to list
client = (clientsock, addr)
#clientlist.append(client)
# set start time to first connection
#if len(clientlist) == 1:
# starttime = time.time()
# put client into Queue
q.put(client)
except socket.error as e:
serversock.close()
logger.error("Error opening socket (%s)" % e)
sys.exit(1)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, shutting down...')
serversock.close()
sys.exit(0)
def run_command(threadnum, command_dict, clientsock, cliargs, logger):
"""This is the run command function.
It runs commands from the listener socket
using values in command_dict.
"""
#global socket_tasks
#global clientlist
# try to get index name from command or use from diskover config file
try:
index = str(command_dict['index'])
except KeyError:
index = str(config['index'])
pass
# try to get min days mtime from command or use default
try:
mtime = str(command_dict['mtime'])
except KeyError:
mtime = str(cliargs['mtime'])
pass
# try to get min size from command or use default
try:
minsize = str(command_dict['minsize'])
except KeyError:
minsize = str(cliargs['minsize'])
pass
# try to get worker batch size from command or use default
try:
batchsize = str(command_dict['batchsize'])
except KeyError:
batchsize = str(cliargs['batchsize'])
pass
# try to get adaptive batch option from command or use default
try:
adaptivebatch = str(command_dict['adaptivebatch'])
except KeyError:
adaptivebatch = str(cliargs['adaptivebatch'])
pass
# try to get optimize index option from command or use default
try:
optimizeindex = str(command_dict['optimizeindex'])
except KeyError:
optimizeindex = str(cliargs['optimizeindex'])
pass
# try to get auto tag option from command or use default
try:
autotag = str(command_dict['autotag'])
except KeyError:
autotag = str(cliargs['autotag'])
pass
# try to get empty dirs option from command or use default
try:
indexemptydirs = str(command_dict['indexemptydirs'])
except KeyError:
indexemptydirs = str(cliargs['indexemptydirs'])
pass
try:
action = command_dict['action']
pythonpath = config['python_path']
diskoverpath = config['diskover_path']
# set up command for different action
if action == 'crawl':
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-m', mtime, '-s', minsize,
'-q', '-F']
elif action == 'crawlapi':
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '--crawlapi', '-i', index, '-d', path, '-F']
elif action == 'finddupes':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--finddupes', '-q', '-F']
elif action == 'hotdirs':
index2 = str(command_dict['index2'])
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--hotdirs', index2, '-q', '-F']
elif action == 'reindex':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
path = command_dict['path']
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-R', '-q', '-F']
else:
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '-r', '-q', '-F']
elif action == 'updatedirsizes':
try:
recursive = command_dict['recursive']
except KeyError:
recursive = 'false'
pass
if recursive == 'true':
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '--dircalcsonly', '-q', '-F']
else:
path = command_dict['path']
cmd = [pythonpath, diskoverpath, '-b', batchsize,
'-i', index, '-d', path, '--dircalcsonly', '--maxdcdepth', '0', '-q', '-F']
elif action == 'kill':
taskid = command_dict['taskid']
logger.info("[thread-%s]: Kill task message received! (taskid:%s)",
threadnum, taskid)
# do something here to kill task (future)
message = b'{"msg": "taskkilled"}\n'
clientsock.send(message)
return
else:
logger.warning("Unknown action")
message = b'{"error": "unknown action"}\n'
clientsock.send(message)
return
# add adaptive batch
if (adaptivebatch == "True" or adaptivebatch == "true"):
cmd.append('-a')
# add optimize index
if (optimizeindex == "True" or optimizeindex == "true"):
cmd.append('-O')
# add auto tags
if (autotag == "True" or autotag == "true"):
cmd.append('-A')
# add index empty dirs
if (indexemptydirs == "True" or indexemptydirs == "true"):
cmd.append('-e')
# run command using subprocess
starttime = time.time()
taskid = str(uuid.uuid4()).encode('utf-8')
# start process
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# add process to socket_tasks dict
#socket_tasks[taskid] = process
message = b'{"msg": "taskstart", "taskid": "' + taskid + b'"}\n'
clientsock.send(message)
logger.info("[thread-%s]: Running command -> %s", threadnum, cmd)
output, error = process.communicate()
# send exit msg to client
exitcode = process.returncode
elapsedtime = str(get_time(time.time() - starttime)).encode('utf-8')
logger.info("[thread-%s]: Finished command with exit code %d", threadnum, exitcode)
commandOutput = output.decode('utf-8') + error.decode('utf-8')
if (exitcode != 0):
logger.error('Command error:')
else:
logger.info('Command output:')
print('***********************************************************************')
logger.info(commandOutput)
print('***********************************************************************')
messageDict = {
"msg": "taskfinish",
"taskid": str(taskid),
"exitcode": str(exitcode),
"elapsedtime": str(elapsedtime),
"commandOutput": str(commandOutput)
}
message = json.dumps(messageDict)
clientsock.send(message.encode('utf-8'))
except ValueError:
logger.warning("Value error")
message = b'{"error": "value error"}\n'
clientsock.send(message)
pass
except socket.error as e:
logger.error("[thread-%s]: Socket error (%s)" % (threadnum, e))
pass
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8697
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
enterprise_backup_restore_test.py
|
import os, re, copy, json, subprocess
from random import randrange, randint, choice
from threading import Thread
from couchbase_helper.cluster import Cluster
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
from ent_backup_restore.enterprise_backup_restore_base import EnterpriseBackupRestoreBase
from ent_backup_restore.backup_service_upgrade import BackupServiceHook
from membase.api.rest_client import RestConnection, RestHelper, Bucket
from membase.helper.bucket_helper import BucketOperationHelper
from pytests.query_tests_helper import QueryHelperTests
#from lib.membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteUtilHelper, RemoteMachineShellConnection
from security.auditmain import audit
from security.rbac_base import RbacBase
from upgrade.newupgradebasetest import NewUpgradeBaseTest
from couchbase.bucket import Bucket
from couchbase_helper.document import View
from eventing.eventing_base import EventingBaseTest
from tasks.future import Future, TimeoutError
from xdcr.xdcrnewbasetests import NodeHelper
from couchbase_helper.stats_tools import StatsCommon
from testconstants import COUCHBASE_DATA_PATH, WIN_COUCHBASE_DATA_PATH, \
COUCHBASE_FROM_4DOT6, ENT_BKRS, ENT_BKRS_FTS
AUDITBACKUPID = 20480
AUDITRESTOREID = 20485
SOURCE_CB_PARAMS = {
"authUser": "default",
"authPassword": "",
"authSaslUser": "",
"authSaslPassword": "",
"clusterManagerBackoffFactor": 0,
"clusterManagerSleepInitMS": 0,
"clusterManagerSleepMaxMS": 20000,
"dataManagerBackoffFactor": 0,
"dataManagerSleepInitMS": 0,
"dataManagerSleepMaxMS": 20000,
"feedBufferSizeBytes": 0,
"feedBufferAckThreshold": 0
}
INDEX_DEFINITION = {
"type": "fulltext-index",
"name": "",
"uuid": "",
"params": {},
"sourceType": "couchbase",
"sourceName": "default",
"sourceUUID": "",
"sourceParams": SOURCE_CB_PARAMS,
"planParams": {}
}
class EnterpriseBackupRestoreTest(EnterpriseBackupRestoreBase, NewUpgradeBaseTest):
def setUp(self):
super().setUp()
self.users_check_restore = \
self.input.param("users-check-restore", '').replace("ALL", "*").split(";")
if '' in self.users_check_restore:
self.users_check_restore.remove('')
for server in [self.backupset.backup_host, self.backupset.restore_cluster_host]:
conn = RemoteMachineShellConnection(server)
conn.extract_remote_info()
conn.terminate_processes(conn.info, ["cbbackupmgr"])
conn.disconnect()
self.bucket_helper = BucketOperationHelper()
def tearDown(self):
super(EnterpriseBackupRestoreTest, self).tearDown()
def test_backup_create(self):
self.backup_create_validate()
def test_backup_restore_sanity(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform updates and create backups for specified number of times (test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", self.expires)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
if self.auto_failover:
self.log.info("Enabling auto failover on " + str(self.backupset.cluster_host))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self._load_all_buckets(self.master, gen, "update", self.expires)
self.log.info("*** done update items in all buckets")
elif self.ops_type == "delete":
self.log.info("*** start to delete items in all buckets")
self._load_all_buckets(self.master, gen, "delete", self.expires)
self.log.info("*** done to delete items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("\n*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
cmd_init = 'node-init'
if self.same_cluster:
self.log.info("Same cluster")
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
if self.hostname and self.master.ip.endswith(".com"):
options = '--node-init-hostname ' + self.master.ip
shell = RemoteMachineShellConnection(self.master)
output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,
options=options,
cluster_host="localhost",
user=self.master.rest_username,
password=self.master.rest_password)
shell.disconnect()
if not self._check_output("SUCCESS: Node initialize", output):
raise("Failed to set hostname")
else:
self.log.info("Different cluster")
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
rest.init_node()
if self.hostname and self.backupset.restore_cluster_host.ip.endswith(".com"):
options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip
output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,
cluster_host="localhost",
user=self.backupset.restore_cluster_host.rest_username,
password=self.backupset.restore_cluster_host.rest_password)
if not self._check_output("SUCCESS: Node initialize", output):
raise("Failed to set hostname")
shell.disconnect()
self.log.info("\n*** Done reset cluster")
self.sleep(10)
""" Add built-in user cbadminbucket to second cluster """
self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=",
expected_error=self.expected_error)
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def test_backup_restore_after_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
rebalance.result()
self.backup_cluster_validate()
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
def test_backup_restore_with_rebalance(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Does a rebalance on cluster to be backed up with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
3. Takes a backup while rebalance is going on
4. Does a rebalance on cluster to be restored to with specified number of servers in (test param nodes_in) and
servers out (test param nodes_out)
5. Performs a restore on the restore cluster while rebalance is going on
"""
serv_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
serv_out = self.servers[self.nodes_init - self.nodes_out:self.nodes_init]
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create_validate()
self.backupset.number_of_backups = 1
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, serv_in, serv_out)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_cluster_validate()
count += 1
if not self.same_cluster:
self._initialize_nodes(Cluster(), self.input.clusters[0][:self.nodes_init])
serv_in = self.input.clusters[0][self.nodes_init: self.nodes_init + self.nodes_in]
serv_out = self.input.clusters[0][self.nodes_init - self.nodes_out: self.nodes_init]
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_in, serv_out)
else:
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, serv_out, serv_in)
self.sleep(10)
count = 0
while rebalance.state != "FINISHED":
if count == 0:
self.backup_restore_validate(compare_uuid=False, seqno_compare_function="<=")
count += 1
def test_backup_restore_with_ops(self):
"""
1. Create default bucket on the cluster and loads it with given number of items
2. Perform the specified ops (test param ops-type) and create backups for specified number of times
(test param number_of_backups)
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
initial_gen = copy.deepcopy(gen)
initial_keys = []
for x in initial_gen:
initial_keys.append(x[0])
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.ops_type = self.input.param("ops-type", "update")
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
if self.compact_backup and self.ops_type == "delete":
self.log.info("Start to compact backup ")
self.backup_compact_validate()
self.log.info("Validate deleted keys")
self.backup_compact_deleted_keys_validation(initial_keys)
self.log.info("start restore cluster ")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
self.backupset.start = start
self.backupset.end = end
self._backup_restore_with_ops(backup=False, compare_function=">=")
if self.backupset.number_of_backups == 1:
continue
while "{0}/{1}".format(start, end) in restored:
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
restored["{0}/{1}".format(start, end)] = ""
def _backup_restore_with_ops(self, exp=0, backup=True, compare_uuid=False,
compare_function="==", replicas=False,
mode="memory", node=None, repeats=0,
validate_directory_structure=True):
self.ops_type = self.input.param("ops-type", "update")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self.log.info("Start doing ops: %s " % self.ops_type)
if node is None:
node = self.master
self._load_all_buckets(node, gen, self.ops_type, exp)
if backup:
self.backup_cluster_validate(repeats=repeats,
validate_directory_structure=validate_directory_structure)
else:
self.backup_restore_validate(compare_uuid=compare_uuid,
seqno_compare_function=compare_function,
replicas=replicas, mode=mode)
def test_backup_list(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_list_validate()
def test_backup_list_optional_switches(self):
"""
1. Creates specified buckets on the cluster and loads it with given number of items
Note: this test should be run with 2 buckets
2. Creates two backupsets
3. Creates two backups on each of the backupset
4. Executes list command with --name and validates
5. Executes list command with --name and --incr-backup and validates
6. Executes list command with --name, --incr-backup and --bucket-backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.name = "backup2"
self.backup_create(del_old_backup=False)
self._take_n_backups(n=2)
incr_names = 0
backup_name = False
warnning_mesg = "is either empty or it got interrupted"
self.backupset.backup_list_name = "backup"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[0] in line:
incr_names += 1
if self.backups[1] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
incr_names = 0
backup_name = False
self.backupset.backup_list_name = "backup2"
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_list_name in line:
backup_name = True
if self.backups[2] in line:
incr_names += 1
if self.backups[3] in line:
incr_names += 1
self.assertTrue(backup_name, "Expected backup name not found in output")
self.log.info("Expected backup name found in output")
self.assertEqual(incr_names, 2, "Expected backups were not listed for --name option")
self.log.info("Expected backups listed for --name option")
buckets = 0
name = False
self.backupset.backup_list_name = "backup"
self.backupset.backup_incr_backup = self.backups[0]
status, output, message = self.backup_list()
if not status:
self.fail(message)
for line in output:
if warnning_mesg in line:
continue
if self.backupset.backup_incr_backup in line:
name = True
if self.buckets[0].name in line:
buckets += 1
if self.buckets[1].name in line:
buckets += 1
self.assertTrue(name, "Expected incremental backup name not found in output")
self.log.info("Expected incrmental backup name found in output")
self.assertEqual(buckets, 2, "Expected buckets were not listed for --incr-backup option")
self.log.info("Expected buckets were listed for --incr-backup option")
name = False
items = 0
self.backupset.backup_list_name = "backup2"
self.backupset.backup_incr_backup = self.backups[2]
self.backupset.bucket_backup = self.buckets[0].name
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
output = json.loads(output[0])
if self.buckets[0].name == output["name"]:
name = True
items = output["items"]
self.assertTrue(name, "Expected bucket not listed for --bucket-backup option")
self.log.info("Expected bucket listed for --bucket-backup option")
self.assertEqual(items, self.num_items, "Mismatch in items for --bucket-backup option")
self.log.info("Expected number of items for --bucket-backup option")
def test_list_with_large_number_of_backups(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a large number of backups
3. Executes list command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=25)
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]["backups"]
else:
return False, "No output content"
self.assertEqual(len(bk_info), len(self.backups),
"Number of backups did not match. In repo: {0} != in bk: {1}"\
.format(len(bk_info), len(self.backups)))
for backup in bk_info:
if backup["date"] not in self.backups:
raise("backup date does not match")
self.log.info("Number of backups matched")
def _take_n_backups(self, n=1, validate=False):
for i in range(1, n + 1):
if validate:
self.backup_cluster_validate()
else:
self.backup_cluster()
def test_backup_info_with_start_end_flag(self):
"""
1. Create default bucket and load items to bucket
2. Run number of backups pass by param number_of_backups=x
3. Run subcommand info with random start and end values. Value could be index, date or bk nam
4. conf file name: bkrs-info-with-start-end-flag.conf
"""
if self.bkinfo_date_start_ago:
conn = RemoteMachineShellConnection(self.backupset.backup_host)
start_date_cmd = "date --date=\"{} days ago\" '+%d-%m-%Y' "\
.format(self.bkinfo_date_start_ago)
output, error = conn.execute_command(start_date_cmd)
start_date = output[0]
end_date_cmd = "date '+%d-%m-%Y' "
output, error = conn.execute_command(end_date_cmd)
end_date = output[0]
conn.disconnect()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
initial_gen = copy.deepcopy(gen)
self.log.info("Start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Create backup repo ")
self.backup_create()
for i in range(1, self.backupset.number_of_backups + 1):
self.backup_cluster()
self.log.info("done running backup")
if self.bkinfo_start_end_with_bkname:
bkname_start_index = int(self.bkinfo_start_end_with_bkname.split(":")[0])
bkname_start = self.backups[bkname_start_index]
bkname_end_index = int(self.bkinfo_start_end_with_bkname.split(":")[1])
bkname_end = self.backups[bkname_end_index]
if self.bkinfo_date_start_ago:
o, e = self.backup_info(start=start_date,end=end_date)
elif self.bkinfo_start_end_with_bkname:
o, e = self.backup_info(start=bkname_start,end=bkname_end)
else:
o, e = self.backup_info(start=self.bkinfo_start,end=self.bkinfo_end)
if o and o[0]:
bk_info = json.loads(o[0])
bk_info = bk_info["backups"]
if self.debug_logs:
print("\nbk info : ", bk_info)
print("\n bkinfo len: ", len(bk_info))
print("\nbk info date : ", bk_info[0]["date"])
print("\nbk info type : ", bk_info[0]["type"])
print("\nnubmer backup : ", self.backups)
if self.bkinfo_start == 1 and self.bkinfo_end == 1:
if "FULL" not in bk_info[0]["type"]:
self.fail("First backup is not full backup")
elif self.bkinfo_start > 1 and self.bkinfo_end > 1:
if "INCR" not in bk_info[0]["type"]:
self.fail("> 0th backup is not incr backup")
if self.bkinfo_date_start_ago:
if len(bk_info) != len(self.backups):
self.fail("bkrs info failed to show all backups today")
elif self.bkinfo_start_end_with_bkname:
if len(bk_info) != (bkname_end_index - bkname_start_index + 1):
self.fail("bkrs info does not show correct nubmer of backups with backup name")
elif len(bk_info) != (self.bkinfo_end - self.bkinfo_start + 1):
self.fail("bkrs info does not show correct nubmer of backups")
def test_backup_compact(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset and validates the output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact_validate()
def test_backup_with_purge_interval_set_to_float(self):
"""
cbbackupmgr should handle case with purge interval set to float number
return: None
"""
purgeInterval = 1.5
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Set purge interval to float value '%s'" % purgeInterval)
rest = RestConnection(self.backupset.cluster_host)
status, content = rest.set_purge_interval_and_parallel_compaction(purgeInterval)
if status:
self.log.info("Done set purge interval value '%s'" % purgeInterval)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
else:
self.fail("Failed to set purgeInterval value")
def test_restore_from_compacted_backup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backup and validates it
3. Executes compact command on the backupset
4. Restores from the compacted backup and validates it
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.backup_compact()
self.backup_restore_validate()
def test_backup_with_compress_flag(self):
"""
1. Load docs into bucket
2. Backup without compress flag
3. Get backup data size
4. Delete backup repo
5. Do backup again with compress flag
6. Compare those data if it flag works
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backupset.backup_compressed = False
self.backup_cluster()
no_compression = self.get_database_file_info()
self.log.info("\nDelete old backup and do backup again with compress flag")
self.backup_create()
self.backupset.backup_compressed = self.input.param("backup-compressed", False)
self.backup_cluster()
with_compression = self.get_database_file_info()
self.validate_backup_compressed_file(no_compression, with_compression)
def test_backup_restore_with_credentials_env(self):
"""
password will pass as in env variable
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
output, error = self.backup_cluster()
if output and not self._check_output("Backup completed successfully", output):
self.fail("Failed to run with password env %s " % output)
self.backup_cluster_validate(skip_backup=True)
self.backup_list()
self.backup_restore_validate()
def test_backup_with_update_on_disk_of_snapshot_markers(self):
"""
This test is for MB-25727 (using cbbackupwrapper)
Check when cbwrapper will be dropped to remove this test.
No default bucket, default_bucket=false
Create bucket0
Load 100K items to bucket0
Stop persistence on server via cbepctl
Load another 100K items.
Run full backup with cbbackupwrapper
Load another 100K items.
Run diff backup. Backup process will hang with error in memcached as shown above
:return: None
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if version[:5] == "6.5.0":
self.log.info("\n\n******* Due to issue in MB-36904, \
\nthis test will be skipped in 6.5.0 ********\n")
return
gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=100000)
gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=100000)
gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size, end=100000)
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.create_bucket(bucket="bucket0", ramQuotaMB=1024)
self.buckets = rest_conn.get_buckets()
authentication = "-u Administrator -p password"
self._load_all_buckets(self.master, gen1, "create", 0)
self.log.info("Stop persistent")
cluster_nodes = rest_conn.get_nodes()
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop %s" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
"bucket0",
authentication))
shell.disconnect()
self.log.info("Load 2nd batch docs")
self._load_all_buckets(self.master, gen2, "create", 0)
self.log.info("Run full backup with cbbackupwrapper")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
backup_dir = self.tmp_path + "backup" + self.master.ip
shell.execute_command("rm -rf %s" % backup_dir)
shell.execute_command("mkdir %s" % backup_dir)
shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m full %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
self.log.info("Load 3rd batch docs")
self._load_all_buckets(self.master, gen3, "create", 0)
self.log.info("Run diff backup with cbbackupwrapper")
output, _ = shell.execute_command("cd %s;./cbbackupwrapper%s http://%s:8091 %s -m diff %s"
% (self.cli_command_location, self.cmd_ext,
self.backupset.cluster_host.ip,
backup_dir,
authentication))
if output and "SUCCESSFULLY COMPLETED" not in output[1]:
self.fail("Failed to backup as the fix in MB-25727")
shell.disconnect()
def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):
"""
This test is for MB-25809
Set default_bucket=False
Create bucket with 1 replica
Load 10K items to bucket
Backup data from bucket
Create other bucket with 2 replicas in other cluster
Restore data to bucket with 2 replicas
Verify data and bucket setting. It must retain 2 replicas
:return: None
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=10000)
if not self.new_replicas:
self.fail("This test needs to pass param 'new-replicas' to run")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Start backup cluster")
self.backup_cluster_validate()
self.backup_restore_validate()
self.log.info("replicas from backup bucket: {0}".format(self.num_replicas))
self.log.info("replica in restore bucket should be {0} after restore"\
.format(self.new_replicas))
rest_r = RestConnection(self.backupset.restore_cluster_host)
for bucket in self.buckets:
bucket_stats = rest_r.get_bucket_json(bucket.name)
if self.new_replicas != bucket_stats["replicaNumber"]:
self.fail("replia number in bucket {0} did change after restore"\
.format(bucket.name))
self.log.info("Verified replica in bucket {0}: {1}"\
.format(bucket.name,
bucket_stats["replicaNumber"]))
def test_restore_with_invalid_bucket_config_json(self):
"""
When bucket-config.json in latest backup corrupted,
The merge backups should fail.
1. Create a bucket and load docs into it.
2. Create a backup and validate it.
3. Run full backup
4. Load more docs into bucket
5. Run backup (incremental) and verify.
6. Modify backup-config.json to make invalid json in content
7. Run restore to other bucket, restore should fail with error
"""
gen = BlobGenerator("ent-backup_1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
error_msg = "Error merging data: Unable to read bucket settings because bucket-config.json is corrupt"
if not status:
self.fail(message)
backup_count = 0
for line in output:
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}",
line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
backup_bucket_config_path = self.backupset.directory + "/backup" + \
"/" + self.backups[self.backupset.number_of_backups - 1] + \
"/" + self.buckets[0].name + "-*" \
"/bucket-config.json"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.log.info("Remove } in bucket-config.json to make it invalid json ")
remote_client.execute_command("sed -i 's/}//' %s " % backup_bucket_config_path)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1,
self.backupset.number_of_backups + 1)
result, output, _ = self.backup_merge()
if result:
self.log.info("Here is the output from command %s " % output[0])
if not self._check_output(error_msg, output):
self.fail("read bucket config should fail since bucket-config.json is invalid")
remote_client.disconnect()
def test_restore_with_non_exist_bucket(self):
"""
1. Create a bucket A
2. Load docs to bucket A
3. Do backup bucket A
4. Delete bucket A
5. Restore to bucket A (non exist bucket)
6. Expect errors throw out
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
output, _ = self.backup_restore()
if output and "Error restoring cluster" not in output[0]:
self.fail("Restore to non exist bucket should fail")
def test_merge_backup_from_old_and_new_bucket(self):
"""
1. Create a bucket A
2. Load docs with key 1
3. Do backup
4. Delete bucket A
5. Re-create bucket A
6. Load docs with key 2
7. Do backup
8. Do merge backup. Verify backup only contain docs key 2
"""
gen = BlobGenerator("ent-backup1_", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
if self.bucket_delete:
self.log.info("Start to delete bucket")
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
elif self.bucket_flush:
self.log.info("Start to flush bucket")
self._all_buckets_flush()
gen = BlobGenerator("ent-backup2_", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("Start to load bucket again with different key")
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster()
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = self.backupset.number_of_backups
self.merged = True
result, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, "ent-backup1")
self.backup_cluster_validate(skip_backup=True)
def test_merge_backup_with_merge_kill_and_re_merge(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup
6. Merge backup
7. Kill merge process
8. Merge backup again
Result: 2nd merge should run ok
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 2
self.merged = True
merge_threads = []
merge_thread = Thread(target=self.backup_merge)
merge_threads.append(merge_thread)
merge_thread.start()
merge_kill_thread = Thread(target=self._kill_cbbackupmgr)
merge_threads.append(merge_kill_thread)
merge_kill_thread.start()
for merge_thread in merge_threads:
merge_thread.join()
status, output, message = self.backup_list()
if not status:
self.fail(message)
result, output, _ = self.backup_merge()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_merge_backup_with_partial_backup(self):
"""
1. Create a bucket A
2. Load docs
3. Do backup
4. Load docs
5. Do backup and kill backup process
6. Merge backup. Merge should fail
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_threads = []
backup_thread = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread)
backup_thread.start()
backup_kill_thread = Thread(target=self._kill_cbbackupmgr)
backup_threads.append(backup_kill_thread)
backup_kill_thread.start()
for backup_thread in backup_threads:
backup_thread.join()
self.backupset.number_of_backups += 1
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = 3
self.merged = True
status, output, error = self.backup_merge()
if status:
self.fail("This merge should fail due to last backup killed, not complete yet")
elif "Merging backup failed" in error:
self.log.info("Test failed as expected as last backup failed to complete")
status, output, message = self.backup_list()
if not status:
self.fail(message)
def _kill_cbbackupmgr(self):
"""
kill all cbbackupmgr processes
"""
self.sleep(1, "times need for cbbackupmgr process run")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
if self.os_name != "windows":
cmd = "ps aux | grep cbbackupmgr | gawk '{print $2}' | xargs kill -9"
output, _ = shell.execute_command(cmd)
else:
cmd = "tasklist | grep cbbackupmgr | gawk '{printf$2}'"
output, _ = shell.execute_command(cmd)
if output:
kill_cmd = "taskkill /F /T /pid %d " % int(output[0])
output, _ = shell.execute_command(kill_cmd)
if output and "SUCCESS" not in output[0]:
self.fail("Failed to kill cbbackupmgr on windows")
shell.disconnect()
def test_merge_backup_with_purge_deleted_keys(self):
"""
1. Load 100K docs to a bucket A with key 1
2. Delete 50K docs from bucket A
3. Load 50K docs with key 2 to bucket A
4. Take backup
5. Run compaction on each vbucket to purge all delete keys
6. Load again 25K docs with key 3
7. Run backup again
8. Load another 25K docs with key 4
9. Run backup. It should not fail
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.log.info("Delete half docs of 1st batch")
delete_gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, delete_gen, "delete", 0)
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items // 2)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
nodes = []
upto_seq = 100000
self.log.info("Start compact each vbucket in bucket")
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in RestConnection(self.master).get_buckets():
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if found:
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.compact_vbuckets(len(bucket.vbuckets), cluster_nodes, upto_seq)
shell.disconnect()
found = self.get_info_in_database(self.backupset.cluster_host, bucket, "deleted")
if not found:
self.log.info("Load another docs to bucket %s " % bucket.name)
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen3, "create",
self.expire_time)
self.backup_cluster()
create_gen4 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items // 4)
self._load_bucket(bucket, self.master, create_gen4, "create",
self.expire_time)
self.backup_cluster()
self.backupset.end = 3
status, output, message = self.backup_merge()
if not status:
self.fail(message)
else:
self.fail("cbcompact failed to purge deleted key")
def test_merge_backup_with_failover_logs(self):
"""
1. Load 100K docs into bucket.
2. Wait for all docs persisted.
3. Stop persistence.
4. Load another 100K docs to bucket.
5. Kill memcached will generate about 4 failover logs.
./cbstats localhost:11210 -u username -p pass failovers | grep num_entries
6. Take backup.
7. Load another 100K docs
8. Take backup again.
Verify:
Only 1st backup is full backup
All backup after would be incremental backup
In 4.5.1, all backups would be full backup
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup1", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
failed_persisted_bucket = []
rest = RestConnection(self.master)
cluster_nodes = rest.get_nodes()
for bucket in self.buckets:
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
bucket.name, 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append(bucket.name)
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.log.info("Stop persistence at each node")
clusters = copy.deepcopy(cluster_nodes)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
for bucket in self.buckets:
for node in clusters:
shell.execute_command("%scbepctl%s %s:11210 -b %s stop" % \
(self.cli_command_location,
self.cmd_ext,
node.ip,
bucket.name))
shell.disconnect()
self.log.info("Load 2nd batch docs")
create_gen2 = BlobGenerator("ent-backup2", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen2, "create", 0)
self.sleep(5)
self.log.info("Crash cluster via kill memcached")
for node in clusters:
for server in self.servers:
if node.ip == server.ip:
num_entries = 4
reach_num_entries = False
while not reach_num_entries:
shell = RemoteMachineShellConnection(server)
shell.kill_memcached()
ready = False
while not ready:
if not RestHelper(RestConnection(server)).is_ns_server_running():
self.sleep(10)
else:
ready = True
cmd = "%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries " \
"| gawk%s '{printf $2}' | grep -m 5 '4\|5\|6\|7'" \
% (self.cli_command_location, self.cmd_ext, server.ip,
"cbadminbucket", "password", self.cmd_ext)
output, error = shell.execute_command(cmd)
shell.disconnect()
if output:
self.log.info("number failover logs entries reached. %s " % output)
reach_num_entries = True
self.backup_create()
self.log.info("Start backup data")
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Load 3rd batch docs")
create_gen3 = BlobGenerator("ent-backup3", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen3, "create", 0)
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail(message)
def test_backupmgr_with_short_option(self):
"""
Test short option flags at each option
"""
cmd = "%scbbackupmgr%s " % (self.cli_command_location, self.cmd_ext)
cmd += "%s " % self.input.param("command", "backup")
options = " -%s %s " % (self.input.param("repo", "-repo"),
self.backupset.name)
options += " -%s %s" % (self.input.param("archive", "-archive"),
self.backupset.directory)
if self.input.param("command", "backup") != "list":
options += " -%s http://%s:%s" % (self.input.param("cluster", "-cluster"),
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
options += " -%s Administrator" % self.input.param("bkusername", "-username")
options += " -%s password" % self.input.param("bkpassword", "-password")
self.backup_create()
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = shell.execute_command("%s %s " % (cmd, options))
shell.log_command_output(output, error)
shell.disconnect()
if error:
self.fail("There is a error in %s " % error)
def test_backupmgr_help_display(self):
"""
Test display help manual in each option
We do not test compare the whole content but only
few first lines to make sure manual page displayed.
"""
display_option = self.input.param("display", "-h")
if self.input.param("subcommand", None) is None:
subcommand = ""
else:
subcommand = self.input.param("subcommand", None)
if subcommand == "list":
subcommand = "info"
cmd = "{0}cbbackupmgr{1} ".format(self.cli_command_location, self.cmd_ext)
if display_option == "--help":
display_option = self.long_help_flag
elif display_option == "-h":
self.long_help_flag = self.short_help_flag
cmd += " {0} {1} ".format(subcommand, display_option)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
output, error = shell.execute_command("{0} ".format(cmd))
self.log.info("Verify print out help message")
if display_option == "-h":
if subcommand == "":
content = ['cbbackupmgr [<command>] [<args>]', '',
' backup Backup a Couchbase cluster']
elif subcommand == "help":
content = ['cbbackupmgr help [<command>] [<args>]', '',
' archivelayout View the archive directory layout structure']
else:
content = ['cbbackupmgr {0} [<args>]'.format(subcommand), '',
'Required Flags:']
self.validate_help_content(output[:3], content)
elif display_option == "--help":
content = None
if subcommand == "":
content = \
['CBBACKUPMGR(1) Couchbase Server Manual CBBACKUPMGR(1)']
self.validate_help_content(output, content)
else:
subcmd_cap = subcommand.upper()
content = \
['CBBACKUPMGR-{0}(1) Couchbase Server Manual CBBACKUPMGR-{1}(1)'\
.format(subcmd_cap, subcmd_cap)]
self.validate_help_content(output, content)
if self.bkrs_flag is not None:
self.assertTrue(self._check_output(self.bkrs_flag, output),
"Missing flag {0} in help content".format(self.bkrs_flag))
shell.disconnect()
def test_cbbackupmgr_help_contains_objstore_info(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
supports_read_only = ['restore']
for sub_command in ['backup', 'collect-logs', 'config', 'examine', 'info', 'remove', 'restore']:
output, error = remote_client.execute_command(f"{self.cli_command_location}/cbbackupmgr {sub_command} -h")
if error:
self.fail(f"Expected to be able to get help for {sub_command}")
arguments = ['--obj-access-key-id', '--obj-cacert', '--obj-endpoint', '--obj-no-ssl-verify',
'--obj-region', '--obj-secret-access-key', '--obj-staging-dir', '--s3-force-path-style',
'--obj-log-level']
if sub_command in supports_read_only:
arguments.append('--obj-read-only')
for argument in arguments:
found = False
for line in output:
found = found or argument in line
self.assertTrue(found, f"Expected to find help about {argument}")
def test_backup_restore_with_optional_flags(self):
"""
1. Create a bucket
2. Load docs to bucket
3. Backup with optional flags like no-ssl-verify, secure-conn
4. Verify backup data in backup file
"""
self.log.info("Load 1st batch docs")
create_gen1 = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, create_gen1, "create", 0)
self.backup_create()
verify_data = True
output, error = self.backup_cluster()
if self.backupset.secure_conn:
if self.backupset.bk_no_cert:
if self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
elif self._check_output("Error", output):
verify_data = False
else:
if not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
if verify_data:
self.validate_backup_data(self.backupset.backup_host,
self.servers[:self.nodes_init],
"ent-backup", False, False, "memory",
self.num_items, None)
if self.do_restore:
self.log.info("Restore with secure connection")
self.backup_restore()
def test_restore_with_filter_regex(self):
"""
1. Create a bucket
2. Load docs to bucket with key patterned
3. Backup docs
4. Delete bucket
5. Restore docs with regex
6. Verify only key or value in regex restored to bucket
NOTE: This test requires a specific config/ini to run correctly; if provided with an incorrect config
testrunner will restore data into the bucket that was backed up on the same cluster without performing a
flush. This will mean cbbackupmgr will restore with conflict resolution enabled and the validation will find
an unexpected amount of keys (all of them) in the target bucket.
"""
key_name = "ent-backup"
if self.backupset.random_keys:
key_name = "random_keys"
self.validate_keys = self.input.param("validate_keys", False)
if self.validate_keys:
gen = BlobGenerator(key_name, "ent-backup-", self.value_size,
end=self.num_items)
else:
gen = DocumentGenerator('random_keys', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start backup")
self.backup_create()
self.backup_cluster()
self.backup_restore()
self.merged = False
regex_check = self.backupset.filter_keys
if not self.backupset.filter_keys:
regex_check = self.backupset.filter_values
self.validate_backup_data(self.backupset.backup_host,
[self.backupset.restore_cluster_host],
key_name, False, False, "memory",
self.num_items, None,
validate_keys=self.validate_keys,
regex_pattern=regex_check)
def test_backup_with_rbac(self):
"""
1. Create a cluster
2. Create a bucket and load date
3. Create a user with specific role
param in conf: new_user
param in conf: new_role
Roles:
admin, ro_admin, cluster_admin, bucket_full_access[*], bucket_admin[*],
views_admin[*],
replication_admin, roadmin_no_access, cluster_admin_no_access,
bucket_admin_no_access, view_admin_no_access, replication_admin_no_access,
view_replication_admin, replication_ro_admin, bucket_view_replication_admin,
4. Run backup with new user created
5. Verify if backup command handles user role correctly
"""
all_buckets = self.input.param("all_buckets", False)
backup_failed = False
if self.create_fts_index:
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
index_definition = INDEX_DEFINITION
index_name = index_definition['name'] = "age"
fts_server = self.get_nodes_from_services_map(service_type="fts")
rest_fts = RestConnection(fts_server)
try:
self.log.info("Create fts index")
rest_fts.create_fts_index(index_name, index_definition)
except Exception as ex:
self.fail(ex)
else:
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_views:
self._create_views()
self.backup_create()
if all_buckets:
if "-" in self.cluster_new_role:
self.cluster_new_role = "[*],".join(self.cluster_new_role.split("-")) + "[*]"
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: {0} with role: {1} to do backup *****"\
.format(self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"password": "password"}]
rolelist = [{"id": "{0}".format(self.cluster_new_user),
"name": "{0}".format(self.cluster_new_user),
"roles": "{0}".format(self.cluster_new_role)}]
users_can_backup_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin",
"cluster_admin", "backup_admin"]
users_can_not_backup_all = ["views_admin[*]", "replication_admin",
"replication_target[*]", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"ro_admin", "bucket_admin[*]", "cluster_admin"]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: {0} with role: {1} " \
.format(self.cluster_new_user,
self.cluster_new_role))
output, error = self.backup_cluster()
success_msg = 'Backup completed successfully'
fail_msg = ["Error backing up cluster:"]
for bucket in self.buckets:
fail_msg.append('Backed up bucket "{0}" failed'.format(bucket.name))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
rest_bk = RestConnection(self.backupset.cluster_host)
eventing_service_in = False
bk_cluster_services = list(rest_bk.get_nodes_services().values())
for srv in bk_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = ["Invalid permissions to backup eventing data",
"cluster.eventing.functions!manage"]
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin or eventing_admin role could backup eventing service")
else:
self.fail("User {0} failed to backup data.\n"
.format(self.cluster_new_role) + \
"Here is the output {0} ".format(output))
elif self.cluster_new_role in users_can_not_backup_all:
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to backup")
else:
backup_failed = True
status, _, message = self.backup_list()
if not status:
self.fail(message)
if self.do_verify and not backup_failed:
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup,
self.buckets,
self.skip_consistency,
self.per_node)
self.log.info("*** Start to validate data in merge backup ")
result = self.validate_backup_data(self.backupset.backup_host,
[self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.validate_backup_views()
except Exception as e:
if e:
print(("Exception error: ", e))
if self.cluster_new_role in users_can_not_backup_all:
error_found = False
error_messages = ["Error backing up cluster: Forbidden",
"Could not find file shard_0.sqlite",
"Error backing up cluster: Invalid permissions",
"Database file is empty",
"Error backing up cluster: Unable to find the latest vbucket",
"Failed to backup bucket"]
if self.do_verify:
if str(e) in error_messages or backup_failed:
error_found = True
if not error_found:
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role == "views_admin[*]" and self.create_views:
status, mesg = self.validate_backup_views(self.backupset.backup_host)
if not status:
raise Exception(mesg)
if "Expected error message not thrown" in str(e):
raise Exception("cbbackupmgr does not block user role: {0} to backup" \
.format(self.cluster_new_role))
if self.cluster_new_role in users_can_backup_all:
if not self._check_output(success_msg, output):
self.fail(e)
finally:
if backup_failed:
self.log.info("cbbackupmgr blocked user: {0} to backup"\
.format(self.cluster_new_role))
self.log.info("Delete new create user: {0} ".format(self.cluster_new_user))
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "{0}curl{1} -g -X {2} -u {3}:{4} http://{5}:8091/settings/rbac/users/local/{6}"\
.format(curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_restore_with_rbac(self):
"""
1. Create a backupdata set.
2. Setup cluster.
3. Restore data back to cluster
Important:
This test need to copy entbackup-mh.tgz
to /root or /cygdrive/c/Users/Administrator in backup host.
Files location: 172.23.121.227:/root/entba*.tgz
"""
all_buckets = self.input.param("all_buckets", False)
self.log.info("Copy backup dataset to tmp dir")
shell = RemoteMachineShellConnection(self.backupset.backup_host)
# Since we are just wiping out the archive here, we can just run the object store teardown
if self.objstore_provider:
self.objstore_provider.teardown(shell.extract_remote_info().type.lower(), shell)
else:
shell.execute_command("rm -rf {0} ".format(self.backupset.directory))
shell.execute_command("rm -rf {0} ".format(self.backupset.directory.split("_")[0]))
backup_file = ENT_BKRS
backup_dir_found = False
backup_dir = "entbackup_{0}".format(self.master.ip)
output, error = shell.execute_command("ls | grep entbackup")
self.log.info("check if %s dir exists on this server " % backup_dir)
if output:
for x in output:
if x == backup_dir:
backup_dir_found = True
if not backup_dir_found:
self.log.info("%s dir does not exist on this server. Downloading.. "
% backup_dir)
shell.execute_command("{0} -q {1} --no-check-certificate -O {2}.tgz "
.format(self.wget, backup_file, backup_dir))
shell.execute_command("tar -zxvf {0}.tgz ".format(backup_dir))
shell.execute_command("mv {0} {1}".format(backup_dir.split("_")[0], backup_dir))
if "-" in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("-", ",")
if self.objstore_provider and self.objstore_provider.schema_prefix() == "s3://":
command = ""
if self.backupset.objstore_region or self.backupset.objstore_access_key_id or self.backupset.objstore_secret_access_key:
command += "env"
if self.backupset.objstore_region:
command += f" AWS_REGION={self.backupset.objstore_region}"
if self.backupset.objstore_access_key_id:
command += f" AWS_ACCESS_KEY_ID={self.backupset.objstore_access_key_id}"
if self.backupset.objstore_secret_access_key:
command += f" AWS_SECRET_ACCESS_KEY={self.backupset.objstore_secret_access_key}"
command += " aws"
if self.backupset.objstore_endpoint:
command += f" --endpoint={self.backupset.objstore_endpoint}"
command += f" s3 sync entbackup_{self.master.ip} s3://{self.backupset.objstore_bucket}/{self.backupset.directory}"
_, error = shell.execute_command(command, debug=False) # Contains senstive info so don't log
if error:
self.fail(f"Failed to sync backup to S3: {error}")
else:
shell.execute_command("cp -r entbackup_{0}/ {1}/entbackup_{0}"\
.format(self.master.ip, self.tmp_path))
status, _, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Restore data from backup files")
if all_buckets:
if "bucket_full_access" in self.cluster_new_role and \
"bucket_full_access[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("bucket_full_access",
"bucket_full_access[*]")
else:
self.cluster_new_role = self.cluster_new_role + "[*]"
if "data_backup" in self.cluster_new_role and \
"data_backup[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("data_backup",
"data_backup[*]")
if "fts_admin" in self.cluster_new_role and \
"fts_admin[*]" not in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace("fts_admin",
"fts_admin[*]")
admin_roles = ["cluster_admin", "eventing_admin"]
for role in admin_roles:
if role in self.cluster_new_role:
self.cluster_new_role = self.cluster_new_role.replace(role + "[*]", role)
self.log.info("\n***** Create new user: %s with role: %s to do backup *****"
% (self.cluster_new_user, self.cluster_new_role))
testuser = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"password": "password"}]
rolelist = [{"id": "%s" % self.cluster_new_user,
"name": "%s" % self.cluster_new_user,
"roles": "%s" % self.cluster_new_role}]
try:
status = self.add_built_in_server_user(testuser, rolelist)
if not status:
self.fail("Fail to add user: %s with role: %s " \
% (self.cluster_new_user,
self.cluster_new_role))
users_can_restore_all = ["admin", "bucket_full_access[*]",
"data_backup[*]", "eventing_admin"]
users_can_not_restore_all = ["views_admin[*]", "ro_admin",
"replication_admin", "data_monitoring[*]",
"data_writer[*]", "data_reader[*]",
"data_dcp_reader[*]", "fts_searcher[*]",
"fts_admin[*]", "query_manage_index[*]",
"replication_target[*]", "cluster_admin",
"bucket_admin[*]"]
if self.cluster_new_role in users_can_not_restore_all:
self.should_fail = True
output, error = self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
eventing_service_in = False
rs_cluster_services = list(rest_rs.get_nodes_services().values())
for srv in rs_cluster_services:
if "eventing" in srv:
eventing_service_in = True
eventing_err = "User needs one of the following permissions: cluster.eventing"
if eventing_service_in and self._check_output(eventing_err, output) and \
("admin" not in self.cluster_new_role or \
"eventing_admin" not in self.cluster_new_role):
self.log.info("Only admin role could backup eventing service")
return
success_msg = 'Restore completed successfully'
fail_msg = "Error restoring cluster:"
failed_persisted_bucket = []
ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,
"default", 'ep_queue_size',
0, timeout_in_seconds=120)
if not ready:
failed_persisted_bucket.append("default")
if failed_persisted_bucket:
self.fail("Buckets %s did not persisted." % failed_persisted_bucket)
self.sleep(3)
rest = RestConnection(self.master)
actual_keys = rest.get_active_key_count("default")
print(("\nActual keys in default bucket: %s \n" % actual_keys))
if self.cluster_new_role in users_can_restore_all:
if not self._check_output(success_msg, output):
self.fail("User with roles: %s failed to restore data.\n"
"Here is the output %s " % \
(self.cluster_new_role, output))
roles = []
if "," in self.cluster_new_role:
roles = self.cluster_new_role.split(",")
if set(roles) & set(users_can_not_restore_all) and \
set(roles) & set(users_can_restore_all):
if not self._check_output(success_msg, output):
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the output %s " % \
(self.cluster_new_user, roles, output))
if int(actual_keys) != 10000:
self.fail("User: %s failed to restore data with roles: %s. " \
"Here is the actual docs in bucket %s " % \
(self.cluster_new_user, roles, actual_keys))
elif self.cluster_new_role in users_can_not_restore_all:
if int(actual_keys) == 1000:
self.fail("User: %s with role: %s should not allow to restore data" \
% (self.cluster_new_user,
self.cluster_new_role))
if not self._check_output(fail_msg, output):
self.fail("cbbackupmgr failed to block user to restore")
finally:
self.log.info("Delete new create user: %s " % self.cluster_new_user)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
curl_path = ""
if self.os_name == "windows":
curl_path = self.cli_command_location
cmd = "%scurl%s -g -X %s -u %s:%s http://%s:8091/settings/rbac/users/local/%s" \
% (curl_path,
self.cmd_ext,
"DELETE",
self.master.rest_username,
self.master.rest_password,
self.backupset.cluster_host.ip,
self.cluster_new_user)
output, error = shell.execute_command(cmd)
shell.disconnect()
def test_backup_restore_with_nodes_reshuffle(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Enlists the default zone of current cluster - backsup the cluster and validates
3. Creates a new zone - shuffles cluster host to new zone
4. Restores to cluster host and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.cluster_host)
zones = list(rest_conn.get_zone_names().keys())
source_zone = zones[0]
target_zone = "test_backup_restore"
self.log.info("Current nodes in group {0} : {1}".format(source_zone,
str(list(rest_conn.get_nodes_in_zone(source_zone).keys()))))
self.log.info("Taking backup with current groups setup")
self.backup_create()
self.backup_cluster_validate()
self.log.info("Creating new zone " + target_zone)
rest_conn.add_zone(target_zone)
self.log.info("Moving {0} to new zone {1}".format(self.backupset.cluster_host.ip, target_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], source_zone, target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
self.log.info("Restoring to {0} after group change".format(self.backupset.cluster_host.ip))
try:
self.log.info("Flush bucket")
rest_conn.flush_bucket()
self.backup_restore_validate()
except Exception as ex:
self.fail(str(ex))
finally:
self.log.info("Moving {0} back to old zone {1}".format(self.backupset.cluster_host.ip, source_zone))
rest_conn.shuffle_nodes_in_zones(["{0}".format(self.backupset.cluster_host.ip)], target_zone, source_zone)
self.log.info("Deleting new zone " + target_zone)
rest_conn.delete_zone(target_zone)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
rebalance.result()
def test_backup_restore_with_firewall(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Enables firewall on cluster host and validates if backup cluster command throws expected error
4. Disables firewall on cluster host, takes backup and validates
5. Enables firewall on restore host and validates if backup restore command throws expected error
6. Disables firewall on restore host, restores and validates
"""
if self.os_name == "windows" or self.nonroot:
self.log.info("This firewall test does not run on windows or nonroot user")
return
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.log.info("Enabling firewall on cluster host before backup")
RemoteUtilHelper.enable_firewall(self.backupset.cluster_host)
self.enable_firewall = True
try:
output, error = self.backup_cluster()
self.assertIn("failed to connect", output[0],
"Expected error not thrown by backup cluster when firewall enabled")
finally:
self.log.info("Disabling firewall on cluster host to take backup")
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying backup now")
self.backup_cluster_validate()
self.log.info("Enabling firewall on restore host before restore")
RemoteUtilHelper.enable_firewall(self.backupset.restore_cluster_host)
self.enable_firewall = True
""" reset restore cluster to same services as backup cluster """
try:
output, error = self.backup_restore()
mesg = "connect: connection refused"
if self.skip_buckets:
mesg = "Error restoring cluster:"
self.assertTrue(self._check_output(mesg, output),
"Expected error not thrown by backup restore when firewall enabled")
finally:
self.log.info("Disabling firewall on restore host to restore")
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.disable_firewall()
conn.disconnect()
self.enable_firewall = False
self.log.info("Trying restore now")
self.skip_buckets = False
""" Need to reset restore node with services the same as in backup cluster """
rest = RestConnection(self.backupset.restore_cluster_host)
rest.force_eject_node()
master_services = self.get_services([self.backupset.cluster_host],
self.services_init, start_node=0)
info = rest.get_nodes_self()
if info.memoryQuota and int(info.memoryQuota) > 0:
self.quota = info.memoryQuota
rest.init_node()
if self.hostname and self.backupset.restore_cluster_host.ip.endswith(".com"):
self.log.info("\n*** Set node with hostname")
cmd_init = 'node-init'
options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip
output, _ = conn.execute_couchbase_cli(cli_command=cmd_init, options=options,
cluster_host="localhost",
user=self.backupset.restore_cluster_host.rest_username,
password=self.backupset.restore_cluster_host.rest_password)
if not self._check_output("SUCCESS: Node initialize", output):
raise("Failed to set hostname")
conn.disconnect()
self.sleep(10)
self.backup_restore_validate()
def test_backup_restore_with_audit(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates backupset on backup host
3. Creates a backup of the cluster host - verifies if corresponding entry was created in audit log
4. Restores data on to restore host - verifies if corresponding entry was created in audit log
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
audit_obj = audit(AUDITBACKUPID, self.backupset.cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_create()
self.backup_cluster()
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='backup'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
audit_obj = audit(AUDITBACKUPID, self.backupset.restore_cluster_host)
status = audit_obj.getAuditStatus()
self.log.info("Audit status on {0} is {1}".format(self.backupset.restore_cluster_host.ip, status))
if not status:
self.log.info("Enabling audit on {0}".format(self.backupset.restore_cluster_host.ip))
audit_obj.setAuditEnable('true')
self.backup_restore()
audit_obj = audit(AUDITRESTOREID, self.backupset.restore_cluster_host)
field_verified, value_verified = audit_obj.validateEvents(self._get_event_expected_results(action='restore'))
self.assertTrue(field_verified, "One of the fields is not matching")
self.assertTrue(value_verified, "Values for one of the fields is not matching")
def _get_event_expected_results(self, action):
if action == 'backup':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "opened DCP connection",
"id": AUDITBACKUPID,
"description": "opened DCP connection",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
elif action == 'restore':
expected_results = {
"real_userid:source": "memcached",
"real_userid:user": "default",
"name": "authentication succeeded",
"id": AUDITRESTOREID,
"description": "Authentication to the cluster succeeded",
"timestamp": "{0}".format(self.backups[0]),
"bucket": "{0}".format(self.buckets[0].name),
"sockname": "{0}:11210".format(self.backupset.restore_cluster_host.ip),
"peername": "{0}".format(self.backupset.backup_host.ip)
}
return expected_results
def test_backup_restore_with_lesser_nodes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Adds another node to restore cluster and rebalances - note the test has to be run with nodes_init >= 3 so
that cluster host had more nodes than restore host
3. Creates backupset on backup host
4. Creates backup of cluster host with 3 or more number of nodes and validates
5. Restores to restore host with lesser number of nodes (2) and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.add_node(self.input.clusters[0][1].rest_username, self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].ip)
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate()
def test_backup_with_full_disk(self):
"""
Things to be done before running this testcase:
- scripts/install.py has to be run with init_nodes=False
- scripts/cbqe3043.py has to be run against the ini file - this script will mount a 20MB partition on the
nodes required for the test
1. Creates specified bucket on the cluster and loads it with given number of items
2. Sets backup directory to the 20MB partition and creates a backupset
3. Fills up 20MB partition
4. Keeps taking backup until no space left on device error is hit
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.directory = "/cbqe3043/entbackup"
self.backup_create()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
output, error = conn.execute_command("dd if=/dev/zero of=/cbqe3043/file bs=256M count=50")
conn.log_command_output(output, error)
output, error = self.backup_cluster()
while self._check_output("Backup completed successfully", output):
gen = BlobGenerator("ent-backup{0}{0}".format(randint(1, 10000)), "ent-backup-",
self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, error = self.backup_cluster()
error_msg = "no space left on device"
self.assertTrue(self._check_output(error_msg, output),
"Expected error message not thrown by backup when disk is full")
self.log.info("Expected error thrown by backup command")
conn.execute_command("rm -rf /cbqe3043/file")
conn.disconnect()
def test_backup_and_restore_with_map_buckets(self):
"""
1. Creates specified buckets on the cluster and loads it with given number
of items - memcached bucket has to be created for this test
(memcached_buckets=1)
2. Creates a backupset, takes backup of the cluster host and validates
3. Executes list command on the backup and validates that memcached bucket
has been skipped
4. Restores the backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
if self.create_gsi:
self.create_indexes()
self.backup_create()
self.backup_cluster()
status, output, message = self.backup_list()
if not status:
self.fail("Getting backup list to validate memcached buckets failed.")
for line in output:
self.assertTrue("memcached_bucket0" not in line,
"Memcached bucket found in backup list output after backup")
self.log.info("Memcached bucket not found in backup list output after backup as expected")
self.backup_restore()
def test_backup_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number
of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts
erlang process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
conn.start_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with erlang crash and restart within 180 seconds")
self.log.info("Backup succeeded with erlang crash and restart within 180 seconds")
conn.disconnect()
def test_backup_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts couchbase server
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
conn.start_couchbase()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with couchbase stop and start within 180 seconds")
self.log.info("Backup succeeded with couchbase stop and start within 180 seconds")
def test_backup_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills and restarts memcached process
4. Validates backup output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached()
conn.unpause_memcached()
conn.disconnect()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with memcached crash and restart within 180 seconds")
self.log.info("Backup succeeded with memcached crash and restart within 180 seconds")
def test_backup_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills erlang process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
if self.os_name != "windows":
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang(self.os_name)
output = backup_result.result(timeout=200)
if self.debug_logs:
print(("Raw output from backup run: ", output))
error_mesgs = ["Error backing up cluster: Not all data was backed up due to",
"No connection could be made because the target machine actively refused it."]
error_found = False
for error in error_mesgs:
if self._check_output(error, output):
error_found = True
if not error_found:
raise("Expected error message not thrown by Backup 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills couchbase server
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.stop_couchbase()
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output(
"Error backing up cluster: Not all data was backed up due to connectivity issues.", output),
"Expected error message not thrown by Backup 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Backup 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_backup_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host
3. Initiates a backup - while backup is going on kills memcached process
4. Waits for 200s and Validates backup error
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
try:
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.pause_memcached(self.os_name)
self.sleep(17, "time needs for memcached process completely stopped")
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
output = backup_result.result(timeout=200)
mesg = "Error backing up cluster: Unable to find the latest vbucket sequence numbers"
self.assertTrue(self._check_output(mesg, output),
"Expected error message not thrown by Backup 180 seconds after memcached crash")
self.log.info("Expected error thrown by Backup 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
self.sleep(30)
conn.disconnect()
def test_restore_with_erlang_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts erlang process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
conn.start_couchbase()
conn.disconnect()
timeout_now = 600
output = restore_result.result(timeout=timeout_now)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with erlang crash and restart within 180 seconds")
self.log.info("Restore succeeded with erlang crash and restart within 180 seconds")
def test_restore_with_couchbase_stop_and_start(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts couchbase process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
self.sleep(10)
conn.start_couchbase()
conn.disconnect()
output = restore_result.result(timeout=500)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with couchbase stop and start within 180 seconds")
self.log.info("Restore succeeded with couchbase stop and start within 180 seconds")
def test_restore_with_memcached_crash_and_restart(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills and restarts memcached process
4. Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
conn.unpause_memcached(self.os_name)
conn.disconnect()
output = restore_result.result(timeout=600)
self.assertTrue(self._check_output("Restore completed successfully", output),
"Restore failed with memcached crash and restart within 400 seconds")
self.log.info("Restore succeeded with memcached crash and restart within 400 seconds")
def test_restore_with_erlang_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills erlang process
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.kill_erlang(self.os_name)
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase", output),
"Expected error message not thrown by Restore 180 seconds after erlang crash")
self.log.info("Expected error thrown by Restore 180 seconds after erlang crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_couchbase_stop(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills couchbase server
4. Waits for 200s and Validates restore output
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.stop_couchbase()
output = restore_result.result(timeout=300)
self.assertTrue(self._check_output(
"Error restoring cluster: Not all data was sent to Couchbase due to connectivity issues.", output),
"Expected error message not thrown by Restore 180 seconds after couchbase-server stop")
self.log.info("Expected error message thrown by Restore 180 seconds after couchbase-server stop")
except Exception as ex:
self.fail(str(ex))
finally:
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
def test_restore_with_memcached_crash(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset on the backup host and backsup data
3. Initiates a restore - while restore is going on kills memcached process
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
rest_conn = RestConnection(self.backupset.restore_cluster_host)
rest_conn.create_bucket(bucket="default", ramQuotaMB=512)
try:
conn = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
conn.pause_memcached(self.os_name)
output, error = self.backup_restore()
self.assertTrue(self._check_output(
"Error restoring cluster: failed to connect", output),
"Expected error message not thrown by Restore 180 seconds after memcached crash")
self.log.info("Expected error thrown by Restore 180 seconds after memcached crash")
except Exception as ex:
self.fail(str(ex))
finally:
conn.unpause_memcached(self.os_name)
conn.disconnect()
self.sleep(30)
def test_backup_merge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes specified number of backups (param number_of_backups - should be atleast 2 for this test case)
3. Executes list command and validates if all backups are present
4. Randomly selects a start and end and merges the backups
5. Executes list command again and validates if the new merges set of backups are listed
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=self.backupset.number_of_backups)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
""" remove last 6 chars of offset time in backup name"""
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in info command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Initial number of backups did not match")
self.log.info("Initial number of backups matched")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)
status, output, message = self.backup_merge(check_for_panic=True)
if not status:
self.fail(message)
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
backup_name = bk_info["backups"][i]["date"]
if self.debug_logs:
print("backup name ", backup_name)
print("backup set ", self.backups)
backup_count += 1
if backup_name in self.backups:
self.log.info("{0} matched in info command output".format(backup_name))
else:
self.fail("Didn't expect backup date {0} from the info command output" \
" to be in self.backups (the list of exepected backup dates" \
" after a merge)".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Merged number of backups did not match")
self.log.info("Merged number of backups matched")
def test_backup_merge_with_restore(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - restores from the backups and validates
3. Merges both the backups - restores from merged backup and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed: {0}".format(error))
self.log.info("Finished restoring backup before merging")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.backupset.start = 1
self.backupset.end = 1
rest = RestConnection(self.backupset.restore_cluster_host)
rest.flush_bucket()
output, error = self.backup_restore()
if error:
self.fail("Restoring backup failed")
self.log.info("Finished restoring backup after merging")
def test_backup_merge_with_unmerged(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Takes two backups - merges them into one
3. Takes 2 more backups - merges the new backups with already merged ones and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 2
self.log.info("Merging existing incremental backups")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Taking more backups")
self._take_n_backups(n=2)
self.backupset.start = 1
self.backupset.end = 3
self.log.info("Merging new backups into already merged backup")
status, output, message = self.backup_merge()
if not status:
self.fail(message)
self.log.info("Successfully merged new backups with already merged backup")
def test_merge_backup_with_multi_threads(self):
"""
1. Create a cluster with default bucket
2. Load default bucket with key1
3. Create backup with default one thread
4. Load again to bucket with key2
5. Create backup with 2 threads
6. Merge backup. All backup should contain doc key1 and key2
"""
gen = BlobGenerator("ent-backup1", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("Start doing backup")
self.backup_create()
self.backup_cluster()
gen = BlobGenerator("ent-backup2", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_cluster(self.threads_count)
self.backupset.number_of_backups += 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
self.log.info("Start to merge backup")
self.backupset.start = randrange(1, self.backupset.number_of_backups)
if int(self.backupset.number_of_backups) == 2:
self.backupset.end = 2
elif int(self.backupset.number_of_backups) > 2:
self.backupset.end = randrange(self.backupset.start,
self.backupset.number_of_backups + 1)
self.merged = True
status, output, _ = self.backup_merge()
self.backupset.end -= 1
status, output, message = self.backup_list()
if not status:
self.fail(message)
current_vseqno = self.get_vbucket_seqnos(self.cluster_to_backup, self.buckets,
self.skip_consistency, self.per_node)
self.log.info("*** Start to validate data in merge backup ")
self.validate_backup_data(self.backupset.backup_host, [self.master],
"ent-backup", False, False, "memory",
self.num_items, None)
self.backup_cluster_validate(skip_backup=True)
def test_backup_purge(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with purge option
6. Validates the old backup is deleted and new backup is created successfully
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
old_backup_name = ""
new_backup_name = ""
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
self.sleep(10)
conn = RemoteMachineShellConnection(self.backupset.cluster_host)
conn.kill_erlang()
output = backup_result.result(timeout=200)
self.log.info(str(output))
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
old_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name before purge: " + old_backup_name)
conn.start_couchbase()
conn.disconnect()
self.sleep(30)
output, error = self.backup_cluster()
if error or not self._check_output("Backup completed successfully", output):
self.fail("Taking cluster backup failed.")
status, output, message = self.backup_list()
if not status:
self.fail(message)
if output and output[0]:
bk_info = json.loads(output[0])
bk_info = bk_info["repos"][0]
else:
return False, "No output content"
if bk_info["backups"]:
for i in range(0, len(bk_info["backups"])):
new_backup_name = bk_info["backups"][i]["date"]
self.log.info("Backup name after purge: " + new_backup_name)
# Once the purge (and backup) have completed we shouldn't see any orphaned multipart uploads
if self.objstore_provider:
self.assertEqual(
self.objstore_provider.num_multipart_uploads(), 0,
"Expected all multipart uploads to have been purged (all newly created ones should have also been completed)"
)
self.assertNotEqual(old_backup_name, new_backup_name,
"Old backup name and new backup name are same when purge is used")
self.log.info("Old backup name and new backup name are not same when purge is used")
def test_backup_resume(self):
"""
1. Creates specified bucket on the cluster and loads it with given
number of items
2. Creates a backupset
3. Initiates a backup and kills the erlang server while backup is going on
4. Waits for the backup command to timeout
5. Executes backup command again with resume option
6. Validates the old backup is resumes and backup is completed successfully
"""
num_vbuckets = self.input.param("num_vbuckets", None)
if num_vbuckets:
remote_client = RemoteMachineShellConnection(self.backupset.cluster_host)
command = (
f"curl -X POST -u {self.master.rest_username}:{self.master.rest_password}"
f" {self.master.ip}:8091/diag/eval -d 'ns_config:set(couchbase_num_vbuckets_default, {num_vbuckets}).'"
)
output, _ = remote_client.execute_command(command)
if 'ok' not in output[0]:
self.fail(f"failed to reduce the number of vBuckets {num_vbuckets}")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.bk_with_stop_and_resume(iterations=self.input.param("iterations", 1),
remove_staging_directory=self.input.param("remove_staging_directory", False))
def test_backup_restore_with_deletes(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset - backsup data and validates
3. Perform deletes
4. Restore data and validate
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "delete", 0)
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_failover(self):
"""
1. Test should be run with 2 nodes in cluster host (param: nodes_init = 2)
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Fails over the second node with specified type (param: graceful = True | False)
5. Sets recovery type to specified value (param: recoveryType = full | delta)
6. Adds back the failed over node and rebalances
7. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
nodes_all = rest.node_statuses()
for node in nodes_all:
if node.ip == self.servers[1].ip:
rest.fail_over(otpNode=node.id, graceful=self.graceful)
self.sleep(30)
try:
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
except Exception as e:
if "Set RecoveryType failed" in str(e):
self.sleep(15)
rest.set_recovery_type(otpNode=node.id, recoveryType=self.recoveryType)
rest.add_back_node(otpNode=node.id)
rebalance = self.cluster.async_rebalance(self.servers, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_after_offline_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed, create
default bucket and load data to this bucket.
2. Backup cluster and verify data and delete default bucket
3. Upgrades cluster to upgrade_version re-reates default bucket
4. Restores data and validates
Params:
backup_service_test (bool): Import repository and restore using the backup service.
"""
upgrade_version = self.input.param("upgrade_version", "5.0.0-3330")
if upgrade_version == "5.0.0-3330":
self.fail("\n *** Need param 'upgrade_version=' to run")
backup_service_test = self.input.param("backup_service_test", False)
if backup_service_test:
backup_service_hook = BackupServiceHook(self.servers[1], self.servers, self.backupset, self.objstore_provider)
self.cli_command_location = "/opt/couchbase/bin"
self._install(self.servers)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(self.servers[:2], [self.servers[1]],
[])
rebalance.result()
self.add_built_in_server_user()
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.buckets = RestConnection(self.master).get_buckets()
self.total_buckets = len(self.buckets)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.sleep(5)
BucketOperationHelper.delete_bucket_or_assert(self.master, "default", self)
""" Start to upgrade """
if self.force_version_upgrade:
upgrade_version = self.force_version_upgrade
upgrade_threads = self._async_update(upgrade_version=upgrade_version,
servers=self.servers[:2])
for th in upgrade_threads:
th.join()
self.log.info("Upgraded to: {ver}".format(ver=upgrade_version))
self.sleep(30)
""" Re-create default bucket on upgrade cluster """
RestConnection(self.master).create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(5)
# Create a backup node and perform a backup service import repository and restore
if backup_service_test:
backup_service_hook.backup_service.replace_services(self.servers[1], ['kv,backup'])
backup_service_hook.backup_service.import_repository(self.backupset.directory, self.backupset.name, "my_repo")
backup_service_hook.backup_service.take_one_off_restore("imported", "my_repo", 20, 20)
backup_service_hook.cleanup()
return
""" Only server from Spock needs build in user
to access bucket and other tasks
"""
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
self.add_built_in_server_user()
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
self.master.ip))
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')
backupsets = [self.backupset]
if "5" <= RestConnection(self.master).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
BucketOperationHelper().delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
def test_backup_restore_after_online_upgrade(self):
"""
1. Test has to be supplied initial_version to be installed and
upgrade_version to be upgraded to
2. Installs initial_version on the servers
3. Load data and backup in pre-upgrade
4. Install upgrade version on 2 nodes. Use swap rebalance to upgrade
cluster
5. Operation after upgrade cluster
6. Restores data and validates
"""
if self.initial_version[:1] == "5" and self.upgrade_versions[0][:1] >= "7":
self.log.error("\n\n\n*** ERROR: Direct upgrade from {0} to {1} does not support.\
Test will skip\n\n"\
.format(self.initial_version[:5], self.upgrade_versions[0][:5]))
return
servers = copy.deepcopy(self.servers)
self.vbuckets = self.initial_vbuckets
if len(servers) != 4:
self.fail("\nThis test needs exactly 4 nodes to run! ")
self._install(servers)
count = 0
nodes_fail_to_install = []
for server in servers:
ready = RestHelper(RestConnection(server)).is_ns_server_running(60)
if ready:
count += 1
else:
nodes_fail_to_install.append(server.ip)
if count < len(servers):
self.fail("Some servers may not install Couchbase server: {0}"\
.format(nodes_fail_to_install))
if not self.disable_diag_eval_on_non_local_host:
self.enable_diag_eval_on_non_local_hosts()
cmd = 'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
self.master.rest_username,
self.master.rest_password)
cmd += '-d "path_config:component_path(bin)."'
bin_path = subprocess.check_output(cmd, shell=True)
try:
bin_path = bin_path.decode()
except AttributeError:
pass
if "bin" not in bin_path:
self.fail("Check if cb server install on %s" % self.master.ip)
else:
self.cli_command_location = bin_path.replace('"', '') + "/"
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
rebalance = self.cluster.async_rebalance(servers[:self.nodes_init],
[servers[int(self.nodes_init) - 1]], [])
rebalance.result()
self.sleep(15)
self.add_built_in_server_user()
rest = RestConnection(self.master)
cb_version = rest.get_nodes_version()
initial_compression_mode = "off"
if 5.5 > float(cb_version[:3]):
self.compression_mode = initial_compression_mode
rest.create_bucket(bucket='default', ramQuotaMB=512,
compressionMode=self.compression_mode)
self.buckets = rest.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
""" create index """
if self.create_gsi:
if "5" > rest.get_nodes_version()[:1]:
if self.gsi_type == "forestdb":
self.fail("Need to set param self.gsi_type=memory_optimized")
rest.set_indexer_storage_mode(storageMode="memory_optimized")
else:
rest.set_indexer_storage_mode(storageMode="plasma")
self.create_indexes()
self.backup_create()
if self.backupset.number_of_backups > 1:
self.log.info("Start doing multiple backup")
for i in range(1, self.backupset.number_of_backups + 1):
self._backup_restore_with_ops()
else:
self.backup_cluster_validate()
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.sleep(5)
self.backup_list()
""" Start to online upgrade using swap rebalance """
self.initial_version = self.upgrade_versions[0]
if self.force_version_upgrade:
self.initial_version = self.force_version_upgrade
self.sleep(self.sleep_time,
"Pre-setup of old version is done. Wait for online upgrade to: "
"{0} version".format(self.initial_version))
self.product = 'couchbase-server'
self._install(servers[2:])
self.sleep(self.sleep_time,
"Installation of new version is done. Wait for rebalance")
self.log.info(
"Rebalanced in upgraded nodes and rebalanced out nodes with old version")
add_node_services = [self.add_node_services]
if "-" in self.add_node_services:
add_node_services = self.add_node_services.split("-")
self.cluster.rebalance(servers, servers[2:], servers[:2],
services=add_node_services)
self.sleep(15)
self.backupset.cluster_host = servers[2]
""" Upgrade is done """
self.log.info("** Upgrade is done **")
healthy = False
timeout = 0
while not healthy:
healthy = RestHelper(RestConnection(self.backupset.cluster_host)).is_cluster_healthy()
if not healthy:
if timeout == 120:
self.fail("Node %s is not ready after 2 mins" % self.backupset.cluster_host)
else:
self.sleep(5, "Wait for server up ")
timeout += 5
else:
healthy = True
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
user_name = user.replace('[', '_').replace(']', '_')
testuser = [{'id': user_name, 'name': user_name,
'password': 'password'}]
rolelist = [{'id': user_name, 'name': user_name,
'roles': user}]
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
servers[2].ip))
RbacBase().create_user_source(testuser, 'builtin', servers[2])
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
status = RbacBase().add_user_role(rolelist, RestConnection(servers[2]), 'builtin')
self.log.info(status)
if self.backupset.number_of_backups_after_upgrade:
self.backupset.number_of_backups += \
self.backupset.number_of_backups_after_upgrade
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
self.add_built_in_server_user(node=servers[2])
for i in range(1, self.backupset.number_of_backups_after_upgrade + 2):
self.log.info("_backup_restore_with_ops #{0} started...".format(i))
validate_dir_struct = True
if i > 2:
validate_dir_struct = False
self._backup_restore_with_ops(node=self.backupset.cluster_host, repeats=1,
validate_directory_structure=validate_dir_struct)
self.backup_list()
""" merged after upgrade """
if self.after_upgrade_merged:
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
self.backup_list()
backupsets = [self.backupset]
if "5" <= RestConnection(servers[2]).get_nodes_version()[:1]:
for user in self.users_check_restore:
new_backupset = copy.deepcopy(self.backupset)
new_backupset.restore_cluster_host_username = user.replace('[', '_').replace(']', '_')
backupsets.append(new_backupset)
for backupset in backupsets:
self.backupset = backupset
if self.bucket_flush:
self.log.info("Start to flush bucket")
rest = RestConnection(servers[2])
rest.flush_bucket()
else:
self.bucket_helper.delete_bucket_or_assert(self.backupset.cluster_host,
"default", self)
""" Re-create default bucket on upgrade cluster """
RestConnection(servers[2]).create_bucket(bucket='default',
ramQuotaMB=512,
compressionMode=self.compression_mode)
self.sleep(5)
self.total_buckets = len(self.buckets)
if self.after_upgrade_merged:
self.backupset.end = 1
""" restore back to cluster """
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
if self.create_gsi:
self.verify_gsi()
def test_backup_restore_with_python_sdk(self):
"""
1. Note that python sdk has to be installed on all nodes before running this test
2. Connects to default bucket on cluster host using Python SDK
- loads specifed number of items
3. Creates a backupset, backsup data and validates
4. Restores data and validates
5. Connects to default bucket on restore host using Python SDK
6. Retrieves cas and flgas of each doc on both cluster and restore host
- validates if they are equal
"""
testuser = [{'id': 'default', 'name': 'default', 'password': 'password'}]
rolelist = [{'id': 'default', 'name': 'default', 'roles': 'admin'}]
self.add_built_in_server_user(testuser, rolelist)
try:
cb = Bucket('couchbase://' + self.backupset.cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on cluster host"
" using python SDK")
else:
self.fail("Failed to establish connection to bucket on cluster host"
" using python SDK")
except Exception as ex:
self.fail(str(ex))
self.log.info("Loading bucket with data using python SDK")
for i in range(1, self.num_items + 1):
cb.upsert("doc" + str(i), "value" + str(i))
cluster_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
cluster_host_data[key] = {}
cluster_host_data[key]["cas"] = str(value_obj.cas)
cluster_host_data[key]["flags"] = str(value_obj.flags)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
self.add_built_in_server_user(testuser, rolelist, self.backupset.restore_cluster_host)
try:
cb = Bucket('couchbase://' + self.backupset.restore_cluster_host.ip + '/default',
password="password")
if cb is not None:
self.log.info("Established connection to bucket on restore host " \
"using python SDK")
else:
self.fail("Failed to establish connection to bucket on restore " \
"host using python SDK")
except Exception as ex:
self.fail(str(ex))
restore_host_data = {}
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
value_obj = cb.get(key=key)
restore_host_data[key] = {}
restore_host_data[key]["cas"] = str(value_obj.cas)
restore_host_data[key]["flags"] = str(value_obj.flags)
self.log.info("Comparing cluster host data cas and flags against restore host data")
for i in range(1, self.num_items + 1):
key = "doc" + str(i)
if cluster_host_data[key]["cas"] != restore_host_data[key]["cas"]:
self.fail("CAS mismatch for key: {0}".format(key))
if cluster_host_data[key]["flags"] != restore_host_data[key]["flags"]:
self.fail("Flags mismatch for key: {0}".format(key))
self.log.info("Successfully validated cluster host data cas and flags " \
"against restore host data")
def test_backup_restore_with_flush(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Flushes the bucket
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.flush_bucket()
self.log.info("Flushed default bucket - restoring data now..")
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_recreate(self):
"""
1. Test should be run with same-cluster=True
2. Creates specified bucket on the cluster and loads it with given number of items
3. Creates a backupset - backsup data and validates
4. Deletes the bucket and recreates it
5. Restores data and validates
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
rest = RestConnection(self.backupset.cluster_host)
rest.delete_bucket()
bucket_name = "default"
rest_helper = RestHelper(rest)
rest.create_bucket(bucket=bucket_name, ramQuotaMB=512)
bucket_ready = rest_helper.vbucket_map_ready(bucket_name)
if not bucket_ready:
self.fail("Bucket {0} is not created after 120 seconds.".format(bucket_name))
self.log.info("Deleted {0} bucket and recreated it - restoring it now.."\
.format(bucket_name))
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_create_negative_args(self):
"""
Validates error messages for negative inputs of create command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
cmd = "config"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
# ['cbbackupmgr config [<args>]', '', 'Required Flags:', '', ' -a,--archive The archive directory to use', ' -r,--repo The name of the backup repository to create and', ' configure', '', 'Optional Flags:', '', ' --exclude-buckets A comma separated list of buckets to exclude from', ' backups. All buckets except for the ones specified', ' will be backed up.', ' --include-buckets A comma separated list of buckets to back up. Only', ' buckets in this list are backed up.', ' --disable-bucket-config Disables backing up bucket configuration', ' information', ' --disable-views Disables backing up view definitions', ' --disable-gsi-indexes Disables backing up GSI index definitions', ' --disable-ft-indexes Disables backing up Full Text index definitions', ' --disable-data Disables backing up cluster data', ' -h,--help Prints the help message', '']
self.assertEqual(output[0], "cbbackupmgr config [<args>]", "Expected error message not thrown")
cmd = "config --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "config --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "config --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
self.backup_create()
cmd = "config --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertEqual(output[0], "Backup repository creation failed: Backup Repository `backup` exists",
"Expected error message not thrown")
def test_objstore_negative_args(self):
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
# Run all the sub_commands with the (non-objstore) required arguments (so that we are actually checking the
# correct error)
for sub_command in ['backup -a archive -r repo -c localhost -u admin -p password',
'collect-logs -a archive',
'config -a archive -r repo',
'examine -a archive -r repo -k asdf --bucket asdf',
'info -a archive',
'remove -a archive -r repo',
'restore -a archive -r repo -c localhost -u admin -p password']:
# Check all the object store arguments (ones that require an argument have one provided so that we are
# validating cbbackupmgr and not cbflag).
for argument in ['--obj-access-key-id asdf',
'--obj-cacert asdf',
'--obj-endpoint asdf',
'--obj-log-level asdf',
'--obj-no-ssl-verify',
'--obj-region asdf',
'--obj-secret-access-key asdf']:
# Check all the common object store commands
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "cloud arguments provided without the cloud scheme prefix"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without the cloud schema prefix")
# Check all the S3 specific arguments
if self.objstore_provider.schema_prefix() == 's3://':
for argument in ['--s3-force-path-style']:
output, error = remote_client.execute_command(f"{command} {sub_command} {argument}")
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg_obj = "s3 arguments provided without the archive 's3://' schema prefix"
if "bucket" in sub_command:
error_mesg_obj = "Unknown flag: --bucket"
self.assertIn(error_mesg_obj, output[0],
"Expected an error about providing S3 specific arguments without the s3:// schema prefix")
# Check all the common objstore flags that require arguments without providing arguments. This is testing
# cbflag.
for argument in ['--obj-access-key-id',
'--obj-cacert',
'--obj-endpoint',
'--obj-log-level',
'--obj-region',
'--obj-secret-access-key']:
# Check that common object store arguments that require a value throw the correct error when a value
# is omitted.
output, error = remote_client.execute_command(
f"{command} {sub_command.replace('archive', self.objstore_provider.schema_prefix() + 'archive')} --obj-staging-dir staging {argument}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = f"Expected argument for option: {argument}"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about providing cloud arguments without a value")
# Test omitting the staging directory argument
output, error = remote_client.execute_command(
f"{command} {sub_command.replace('archive', self.objstore_provider.schema_prefix() + 'archive')}"
)
remote_client.log_command_output(output, error)
self.assertNotEqual(len(output), 0)
error_mesg = "you must provide the '--obj-staging-dir' argument"
if "bucket" in sub_command:
error_mesg = "Unknown flag: --bucket"
self.assertIn(error_mesg, output[0],
"Expected an error about not supplying the '--obj-staging-dir' argument")
def test_backup_cluster_restore_negative_args(self):
"""
Validates error messages for negative inputs of cluster or restore command - command parameter
decides which command to test
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd_to_test = self.input.param("command", "backup")
if cmd_to_test == "restore":
cmd = cmd_to_test + " --archive {0} --repo {1} --host http://{2}:{3} --username {4} \
--password {5}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
if "7.0.1" in self.cb_version:
self.assertIn("Error restoring cluster: Backup backup doesn't contain any backups", output[-1])
else:
self.assertIn("Error restoring cluster: Repository 'backup' doesn't contain any backups", output[-1])
self.backup_cluster()
cmd = cmd_to_test
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
cmd_test = cmd_to_test
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
self.assertEqual(output[0], "cbbackupmgr {} [<args>]".format(cmd_test))
cmd = cmd_to_test + " --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = cmd_to_test + " --archive xyz -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
cmd = cmd_to_test + " --archive {0} -c http://localhost:8091 -u Administrator -p password".format(
self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -c http://localhost:8091 -u Administrator -p password -r".format(
self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -u Administrator -p password".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -c/--cluster",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c -u Administrator -p password -r repo".format(
self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: -c", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} -c http://{2}:{3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error backing up cluster: cluster credentials required, expected --username/--password or --client-cert/--client-key",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --username", "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} \
--username {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -p/--password",
"Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo abc --cluster http://{1}:{2} --username {3} \
--password {4}".format(self.backupset.directory,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
part_message = "backing up"
if cmd_to_test.startswith('"') and cmd_to_test.endswith('"'):
cmd_test = cmd_to_test[1:-1]
if cmd_test == "restore":
part_message = 'restoring'
self.assertTrue("Error {0} cluster: Backup Repository `abc` not found"\
.format(part_message) in output[-1],
"Expected error message not thrown. Actual output %s " % output[-1])
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster abc --username {2} \
--password {3}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host_username,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output(f"Error {part_message} cluster: failed to connect to any host(s) from the connection string", output), "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username abc \
--password {4}".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_password)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
cmd = cmd_to_test + " --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} \
--password abc".format(self.backupset.directory,
self.backupset.name,
self.backupset.cluster_host.ip,
self.backupset.cluster_host.port,
self.backupset.cluster_host_username)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("check username and password" in output[-1], "Expected error message not thrown")
def test_backup_list_negative_args(self):
"""
Validates error messages for negative inputs of list command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "info"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr info [<args>]", "Expected error message not thrown")
cmd = "info --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "info --archive xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue(self._check_output("archive directory '{0}xyz' does not exist".format(self.root_path), output))
def test_backup_compact_negative_args(self):
"""
Validates error messages for negative inputs of compact command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "compact"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr compact [<args>]",
"Expected error message not thrown")
cmd = "compact --archive"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive",
"Expected error message not thrown")
cmd = "compact --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1}".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: --backup",
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup" \
.format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --backup",
"Expected error message not thrown")
cmd = "compact --archive abc --repo {0} --backup {1}" \
.format(self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertIn("not a directory", output[-1])
cmd = "compact --archive {0} --repo abc --backup {1}" \
.format(self.backupset.directory, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(self._check_output("Backup Repository `abc` not found", output),
"Expected error message not thrown")
cmd = "compact --archive {0} --repo {1} --backup abc".format(self.backupset.directory,
self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Compacting incr backup `backup` of backup `abc` failed:" in output[-1],
"Expected error message not thrown")
def test_backup_merge_negative_args(self):
"""
Validates error messages for negative inputs of merge command
"""
# This error message is thrown when an invalid date range format is supplied to cbbackupmgr.
invalid_range_format_error = "Error merging data: invalid range format, expected two indexes or two dates; the keywords [start, oldest, end, latest] are also valid"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
cmd = "merge"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr merge [<args>]", "Expected error message not thrown")
cmd = "merge --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "merge --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1} -r".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start start --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Error merging data: Repository 'backup' doesn't contain any backups",
"Expected error message not thrown")
self._take_n_backups(n=2)
cmd = "merge --archive {0} --repo {1}".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1} --start bbb --end end".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start".format(self.backupset.directory, self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --start", "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2}".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1} --end aa".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], invalid_range_format_error, "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --end", "Expected error message not thrown")
cmd = "merge --archive xyz --repo {0} --start {1} --end {2}".format(self.backupset.name,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo abc --start {1} --end {2}".format(self.backupset.directory,
self.backups[0], self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Error merging data: Backup Repository `abc` not found" in output[-1],
"Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start abc --end {2}".format(self.backupset.directory,
self.backupset.name, self.backups[1])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end abc".format(self.backupset.directory,
self.backupset.name, self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue(invalid_range_format_error in output[-1], "Expected error message not thrown")
cmd = "merge --archive {0} --repo {1} --start {2} --end {3}".format(self.backupset.directory,
self.backupset.name,
self.backups[1], self.backups[0])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertTrue("Error merging data: invalid range start cannot be before end" in output[-1], "Expected error message not thrown")
def test_backup_remove_negative_args(self):
"""
Validates error messages for negative inputs of remove command
"""
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.backup_create()
self.backup_cluster()
cmd = "remove"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "cbbackupmgr remove [<args>]", "Expected error message not thrown")
cmd = "remove --archive -c http://localhost:8091 -u Administrator -p password -r aa"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --archive", "Expected error message not thrown")
cmd = "remove --archive {0}".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Flag required, but not specified: -r/--repo", "Expected error message not thrown")
cmd = "remove --archive {0} --repo".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertEqual(output[0], "Expected argument for option: --repo", "Expected error message not thrown")
cmd = "remove --archive xyz --repo {0}".format(self.backupset.name)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
self.assertTrue("Removing backup repository failed: archive directory '{0}xyz' does not exist".format(self.root_path) in output[-1],
"Expected error message not thrown")
cmd = "remove --archive {0} --repo xyz".format(self.backupset.directory)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
self.assertIn("Backup Repository `xyz` not found", output[-1])
def test_backup_restore_with_views(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple view on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same view is created in restore cluster
"""
if "ephemeral" in self.input.param("bucket_type", 'membase'):
self.log.info("\n****** view does not support on ephemeral bucket ******")
return
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['index', 'kv'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
default_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
default_view_name = "test"
default_ddoc_name = "ddoc_test"
prefix = "dev_"
query = {"full_set": "true", "stale": "false", "connection_timeout": 60000}
view = View(default_view_name, default_map_func)
task = self.cluster.async_create_view(self.backupset.cluster_host,
default_ddoc_name, view, "default")
task.result()
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
try:
result = self.cluster.query_view(self.backupset.restore_cluster_host,
prefix + default_ddoc_name,
default_view_name, query, timeout=30)
self.assertEqual(len(result['rows']), self.num_items,
"Querying view on restore cluster did not return expected number of items")
self.log.info("Querying view on restore cluster returned expected number of items")
except TimeoutError:
self.fail("View could not be queried in restore cluster within timeout")
def test_backup_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
self.cluster_storage_mode = \
rest_src.get_index_settings()["indexer.settings.storage_mode"]
self.log.info("index storage mode: {0}".format(self.cluster_storage_mode))
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
self.test_storage_mode = self.cluster_storage_mode
if "ephemeral" in self.bucket_type:
self.log.info("ephemeral bucket needs to set backup cluster to memopt for gsi.")
self.test_storage_mode = "memory_optimized"
self.quota = self._reset_storage_mode(rest_src, self.test_storage_mode)
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
rest_src.create_bucket(bucket='default', ramQuotaMB=int(self.quota) - 1,
bucketType=self.bucket_type,
evictionPolicy="noEviction")
self.add_built_in_server_user(node=self.backupset.cluster_host)
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)),
start=0, end=self.num_items)
self.buckets = rest_src.get_buckets()
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using %s -index age -fields=age " \
" -auth %s:%s" % (self.test_storage_mode,
self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].cluster_ip, services=['kv', 'index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [], [])
rebalance.result()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
cmd = "cbindex -type list -auth %s:%s" % (self.master.rest_username,
self.master.rest_password)
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = shell.execute_command(command)
shell.log_command_output(output, error)
shell.disconnect()
try:
if len(output) > 1:
index_name_path = "Index:{0}/{1}".format(self.buckets[0].name, "age")
version = RestConnection(
self.backupset.restore_cluster_host).get_nodes_version()
if version[:1] >= "7":
index_name_path = "Index:{0}/_{0}/_{0}/{1}".format(self.buckets[0].name, "age")
self.assertTrue(self._check_output(index_name_path, output),
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
finally:
if "ephemeral" in self.bucket_type:
self.log.info("reset storage mode back to original")
shell = RemoteMachineShellConnection(self.backupset.cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)
shell.enable_diag_eval_on_non_local_hosts()
shell.disconnect()
self._reset_storage_mode(rest_src, self.cluster_storage_mode)
self._reset_storage_mode(rest_target, self.cluster_storage_mode)
def test_backup_merge_restore_with_gsi(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a GSI index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same gsi index is created in restore cluster
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_src.add_node(self.servers[1].rest_username,
self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [],
[])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"Num1": {0}, "Num2": {1}}}',
list(range(100)), list(range(100)),
start=0, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num1 -fields=Num1"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
cmd = "cbindex -type create -bucket default -using forestdb -index " \
"num2 -fields=Num2"
remote_client = RemoteMachineShellConnection(
self.backupset.cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if error or "Index created" not in output[-1]:
self.fail("GSI index cannot be created")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = len(self.backups)
self.backup_merge_validate()
rest_target = RestConnection(self.backupset.restore_cluster_host)
rest_target.add_node(self.input.clusters[0][1].rest_username,
self.input.clusters[0][1].rest_password,
self.input.clusters[0][1].cluster_ip, services=['index'])
rebalance = self.cluster.async_rebalance(self.cluster_to_restore, [],
[])
rebalance.result()
start = self.number_of_backups_taken
end = self.number_of_backups_taken
self.backupset.start = start
self.backupset.end = end
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=")
cmd = "cbindex -type list"
remote_client = RemoteMachineShellConnection(
self.backupset.restore_cluster_host)
command = "{0}/{1}".format(self.cli_command_location, cmd)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
remote_client.disconnect()
if len(output) > 1:
self.assertTrue("Index:default/Num1" in output[1],
"GSI index not created in restore cluster as expected")
self.log.info("GSI index created in restore cluster as expected")
else:
self.fail("GSI index not created in restore cluster as expected")
def test_backup_restore_with_fts(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Creates a backupset
3. Creates a simple FTS index on source cluster
4. Backsup data and validates
5. Restores data ans validates
6. Ensures that same FTS index is created in restore cluster
"""
self.test_fts = True
rest_src = RestConnection(self.backupset.cluster_host)
if "community" in self.cb_version:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'index', 'n1ql', 'fts'])
else:
rest_src.add_node(self.servers[1].rest_username, self.servers[1].rest_password,
self.servers[1].cluster_ip, services=['kv', 'fts'])
rebalance = self.cluster.async_rebalance(self.cluster_to_backup, [], [])
rebalance.result()
gen = DocumentGenerator('test_docs', '{{"age": {0}}}', list(range(100)), start=0,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
rest_src_fts = RestConnection(self.servers[1])
try:
from pytests.fts.fts_callable import FTSCallable
fts_obj = FTSCallable(nodes=self.servers, es_validate=False)
index = fts_obj.create_default_index(
index_name="index_default",
bucket_name="default")
fts_obj.wait_for_indexing_complete()
alias = fts_obj.create_alias(target_indexes=[index])
except Exception as ex:
self.fail(ex)
self.backup_cluster_validate()
if self.bucket_type != "ephemeral":
self._create_restore_cluster()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
rest_target_fts = RestConnection(self.input.clusters[0][1])
status = False
try:
status, content = rest_target_fts.get_fts_index_definition(index.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS index not found in restore cluster as expected")
self.log.info("FTS index found in restore cluster as expected")
status, content = rest_target_fts.get_fts_index_definition(alias.name)
self.assertTrue(status and content['status'] == 'ok',
"FTS alias not found in restore cluster as expected")
self.log.info("FTS alias found in restore cluster as expected")
finally:
rest_src_fts.delete_fts_index(index.name)
rest_src_fts.delete_fts_index(alias.name)
if status:
rest_target_fts.delete_fts_index(index.name)
rest_target_fts.delete_fts_index(alias.name)
def test_backup_restore_with_xdcr(self):
"""
1. Creates a XDCR replication between first two servers
2. Creates specified bucket on the cluster and loads it with given number of items
3. Backsup data and validates while replication is going on
4. Restores data and validates while replication is going on
"""
rest_src = RestConnection(self.backupset.cluster_host)
rest_dest = RestConnection(self.servers[1])
try:
rest_src.remove_all_replications()
rest_src.remove_all_remote_clusters()
rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,
self.backupset.cluster_host_password, "C2")
rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
self.sleep(10)
repl_id = rest_src.start_replication('continuous', 'default', "C2")
if repl_id is not None:
self.log.info("Replication created successfully")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
self.sleep(10)
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
for task in tasks:
task.result()
finally:
rest_dest.delete_bucket()
def test_backup_restore_with_warmup(self):
"""
1. Creates specified bucket on the cluster and loads it with given number of items
2. Warmsup the cluster host
2. Backsup data and validates while warmup is on
3. Restores data and validates while warmup is on
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
NodeHelper.do_a_warm_up(self.backupset.cluster_host)
self.sleep(30)
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
""" only membase bucket has warmup state """
if self.bucket_type == "membase":
NodeHelper.wait_warmup_completed([self.backupset.cluster_host])
def stat(self, key):
stats = StatsCommon.get_stats([self.master], 'default', "", key)
val = list(stats.values())[0]
if val.isdigit():
val = int(val)
return val
def load_to_dgm(self, active=75, ttl=0):
"""
decides how many items to load to enter active% dgm state
where active is an integer value between 0 and 100
"""
doc_size = 1024
curr_active = self.stat('vb_active_perc_mem_resident')
# go into heavy dgm
while curr_active > active:
curr_items = self.stat('curr_items')
gen_create = BlobGenerator('dgmkv', 'dgmkv-', doc_size, start=curr_items + 1, end=curr_items + 50000)
try:
self._load_all_buckets(self.master, gen_create, "create", ttl)
except:
pass
curr_active = self.stat('vb_active_perc_mem_resident')
def test_backup_restore_with_dgm(self):
"""
1. Creates specified bucket on the cluster and loads it until dgm
2. Creates a backup set
3. Backsup data and validates
4. Restores data and validates
"""
self.load_to_dgm()
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_auto_compaction(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates auto compaction settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_auto_compaction(dbFragmentThresholdPercentage=80,
dbFragmentThreshold=100,
viewFragmntThresholdPercentage=80,
viewFragmntThreshold=100,
bucket="default")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_update_notifications(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates notification settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.update_notifications("true")
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_backup_restore_with_alerts(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Updates alerts settings
3. Validates backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
rest = RestConnection(self.backupset.cluster_host)
rest.set_alerts_settings('couchbase@localhost', 'root@localhost', 'user', 'pwd')
self.backup_create()
self.backup_cluster_validate()
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_resume_restore(self):
"""
1. Creates specified bucket on the cluster and loads it
2. Performs a backup
3. Starts, then kills a restore
4. Performs and validates a restore using resume
"""
if not self.backupset.resume:
self.fail("Resume must be True for this test")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
self.log.info("Start to flush bucket")
self._all_buckets_flush()
restore_result = self.cluster.async_restore_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version,
force_updates=self.backupset.force_updates,
no_resume=True)
state = ""
while state not in ("FINISHED", "EXECUTING"):
state = restore_result.state
self._kill_cbbackupmgr()
self.assertFalse(self._check_output("success", restore_result.result()))
self.backup_restore_validate(compare_uuid=False, seqno_compare_function=">=")
def test_merge_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self._take_n_backups(n=5)
try:
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
merge_result.result(timeout=400)
except TimeoutError:
status, output, message = self.backup_list()
if not status:
self.fail(message)
backup_count = 0
for line in output:
if "entbackup" in line:
continue
if re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line):
backup_name = re.search("\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}.\d+-\d{2}_\d{2}", line).group()
if backup_name in self.backups:
backup_count += 1
self.log.info("{0} matched in list command output".format(backup_name))
self.assertEqual(backup_count, len(self.backups), "Number of backups after merge crash did not match")
self.log.info("Number of backups after merge crash matched")
def test_compact_with_crash(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
status, output_before_compact, message = self.backup_list()
if not status:
self.fail(message)
try:
compact_result = self.cluster.async_compact_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
backup_to_compact=self.backupset.backup_to_compact,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
self.sleep(10)
self._kill_cbbackupmgr()
compact_result.result(timeout=400)
except TimeoutError:
status, output_after_compact, message = self.backup_list()
if not status:
self.fail(message)
status, message = self.validation_helper.validate_compact_lists(output_before_compact,
output_after_compact,
is_approx=True)
if not status:
self.fail(message)
self.log.info(message)
def test_backup_restore_misc(self):
"""
Misc scenarios for backup and restore
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backupset.name = "!@#$%^&"
output, error = self.backup_create()
self.assertTrue("Backup `!@#$%^` created successfully" in output[0],
"Backup could not be created with special characters")
self.log.info("Backup created with special characters")
self.backupset.name = "backup"
self.backup_create()
self.backup_cluster()
conn = RemoteMachineShellConnection(self.backupset.backup_host)
command = "ls -tr {0}/{1}/{2} | tail".format(self.backupset.directory, self.backupset.name, self.backups[0])
o, e = conn.execute_command(command)
data_dir = o[0]
conn.execute_command("dd if=/dev/zero of=/tmp/entbackup/backup/" +
str(self.backups[0]) +
"/" + data_dir + "/data/shard_0.sqlite" +
" bs=1024 count=100 seek=10 conv=notrunc")
output, error = self.backup_restore()
self.assertTrue("Restore failed due to an internal issue, see logs for details" in output[-1],
"Expected error not thrown when file is corrupt")
self.log.info("Expected error thrown when file is corrupted")
conn.execute_command("mv /tmp/entbackup/backup /tmp/entbackup/backup2")
conn.disconnect()
output, error = self.backup_restore()
self.assertTrue("Backup Repository `backup` not found" in output[-1], "Expected error message not thrown")
self.log.info("Expected error message thrown")
def test_backup_logs_for_keywords(self):
"""
Inspired by CBQE-6034.
1. Perform a Backup.
2. Scan backup logs for bad keywords.
Keywords:
1. CBQE-6034/MB-41131 - Check cbbackupmgr's build version/hash set correctly at build time
by scanning for 'cbbackupmgr version Unknown' in the logs.
2. Scan for 'panic' in the logs.
"""
# Populate the default bucket on self.master with documents
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
# Create backup archive and repository.
self.backup_create()
# Perform backup.
self.backup_cluster()
# Keywords to fail on (Keyword: str, at_start: bool, lines_before: int, lines_after: int)
bad_keywords = [
("cbbackupmgr version Unknown", False, 0, 0), # Checks cbbackupmgr build version/hash set correctly at build time
( "panic", True, 0, 12) # Checks for the panic keyword at start of sentence
]
# Scan logs for keywords in bad_keywords
for keyword, at_start, lines_before, lines_after in bad_keywords:
found, output, error = \
self._check_output_in_backup_logs(keyword, at_start = at_start, lines_before = lines_before, lines_after = lines_after)
if found:
self.fail(f"Found bad keyword(s) '{keyword}' in backup logs:\n" + "\n".join(output))
""" cbbackup restore enhancement only from vulcan """
def test_cbbackupmgr_collect_logs(self):
"""
cbbackupmgr collect-logs will collect logs to archive or
output to any path supplied with flag -o
CB_ARCHIVE_PATH
ex: cbbackupmgr collect-logs -a /tmp/backup
cbbackupmgr collect-logs -a /tmp/backup -o /tmp/logs
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
self._collect_logs()
def test_cbbackupmgr_restore_with_ttl(self):
"""
cbbackupmgr restore --replace-ttl will replace ttl
value with flag --replace-ttl-with
ex: cbbackupmgr restore --replace-ttl all --replace-ttl-with 0
"""
if "5.5" > self.cb_version[:3]:
self.fail("This restore with ttl test is only for cb version 5.5 and later. ")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
if self.replace_ttl == "expired":
if self.bk_with_ttl:
self._load_all_buckets(self.master, gen, "create", int(self.bk_with_ttl))
else:
self._load_all_buckets(self.master, gen, "create", 0)
else:
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster_validate()
if self.bk_with_ttl:
self.sleep(int(self.bk_with_ttl) + 10, "wait items to be expired in backup")
compare_function = "=="
if self.replace_ttl_with:
compare_function = "<="
if self.should_fail:
self.backup_restore()
else:
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=compare_function)
def test_cbbackupmgr_restore_with_vbuckets_filter(self):
"""
cbbackupmgr restore --vbuckets-filter 2,3,4,5,6
it may require to get minimum 2 nodes servers to run this test
"""
if "5.5" > self.cb_version[:3]:
self.fail("This test is only for cb version 5.5 and later. ")
self.num_items = 1000
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
if self.should_fail:
self.backup_cluster()
else:
self.backup_cluster_validate()
if self.restore_should_fail:
self.backup_restore()
else:
self.backup_restore_validate()
def test_cbbackupmgr_with_eventing(self):
"""
Create backup cluster with saslbucket (default_bucket=False).
Backup cluster (backup_before_eventing=True for MB-34077)
Create events
Backup cluster
Create restore cluster
Restore data back to restore cluster
Check if metadata restored (backup_before_eventing=True)
Verify events restored back
"""
if "5.5" > self.cb_version[:3]:
self.fail("This eventing test is only for cb version 5.5 and later. ")
from pytests.eventing.eventing_constants import HANDLER_CODE
from lib.testconstants import STANDARD_BUCKET_PORT
self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
self.create_functions_buckets = self.input.param('create_functions_buckets', True)
self.docs_per_day = self.input.param("doc-per-day", 1)
self.use_memory_manager = self.input.param('use_memory_manager', True)
self.backup_before_eventing = self.input.param('backup_before_eventing', False)
bucket_params = self._create_bucket_params(server=self.master, size=256,
replicas=self.num_replicas)
self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.src_bucket = RestConnection(self.master).get_buckets()
self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.backup_create()
if (self.backup_before_eventing):
self.backup_cluster()
self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
bucket_params=bucket_params)
self.buckets = RestConnection(self.master).get_buckets()
self.gens_load = self.generate_docs(self.docs_per_day)
self.expiry = 3
self.restServer = self.get_nodes_from_services_map(service_type="eventing")
self.rest = RestConnection(self.restServer)
self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,
batch_size=self.batch_size)
function_name = "Function_{0}_{1}".format(randint(1, 1000000000), self._testMethodName)
self.function_name = function_name[0:90]
body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)
bk_events_created = False
rs_events_created = False
try:
self.deploy_function(body)
bk_events_created = True
self.backup_cluster()
rest_bk = RestConnection(self.backupset.cluster_host)
bk_fxn = rest_bk.get_all_functions()
backup_index = 0
if self.backup_before_eventing:
backup_index = 1
self.backupset.start = 1
self.backupset.end = 2
if bk_fxn != "":
self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)
self.backup_restore()
rest_rs = RestConnection(self.backupset.restore_cluster_host)
if self.backup_before_eventing:
self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])
self.bkrs_resume_function(body, rest_rs)
rs_events_created = True
self._verify_restore_events_definition(bk_fxn)
except Exception as e:
self.fail(e)
finally:
master_nodes = [self.backupset.cluster_host,
self.backupset.restore_cluster_host]
for node in master_nodes:
rest = RestConnection(node)
self.bkrs_undeploy_and_delete_function(body, rest, node)
self.rest = RestConnection(self.master)
raise Exception('Test failed. Just clean up eventing function until MB-47236 fixed')
def test_bkrs_logs_when_no_mutations_received(self):
"""
Test that we log an expected message when we don't receive any
mutations for more than 60 seconds. MB-33533.
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0. "
"Version was run with {}".format(version))
rest_conn = RestConnection(self.backupset.cluster_host)
rest_conn.update_autofailover_settings(enabled=False,
timeout=0)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
# We need to wait until the data transfer starts before we pause memcached.
# Read the backup file output until we find evidence of a DCP connection,
# or the backup finishes.
backup_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = "tail -n 1 {}/logs/backup-*.log | grep ' (DCP) '"\
.format(self.backupset.directory)
Future.wait_until(
lambda: (bool(backup_client.execute_command(command)[0]) or backup_result.done()),
lambda x: x is True,
200,
interval_time=0.1,
exponential_backoff=False)
# If the backup finished and we never saw a DCP connection something's not right.
if backup_result.done():
self.fail("Never found evidence of open DCP stream in backup logs.")
# Pause memcached to trigger the log message.
cluster_client = RemoteMachineShellConnection(self.backupset.cluster_host)
cluster_client.pause_memcached(self.os_name, timesleep=200)
cluster_client.unpause_memcached(self.os_name)
cluster_client.disconnect()
backup_result.result(timeout=200)
expected_message = "(timed out after 3m0s|Stream has been inactive for 1m0s)"
command = "cat {}/logs/backup-*.log | grep -E '{}' "\
.format(self.backupset.directory, expected_message)
output, _ = backup_client.execute_command(command)
if not output:
self.fail("Mutations were blocked for over 60 seconds, "
"but this wasn't logged.")
backup_client.disconnect()
def test_log_to_stdout(self):
"""
Test that if the log-to-stdout flag is provided cbbackupmgr will log to stdout
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.log_to_stdout = True
# Test config
output, err = self.backup_create()
if err:
self.fail("Could not create backup directory")
# This is a line that is normally printed in the logs but should now instead be printed to stdout
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
# Test backup
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
output, err = self.backup_cluster()
if err:
self.fail("Could not backup")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
self.backupset.force_updates = True
# Test restore
output, err = self.backup_restore()
if err:
self.fail("Could not restore")
if "(Cmd) cbbackupmgr version" not in " ".join(output):
self.fail("Did not log to standard out")
def test_auto_select_threads(self):
"""
Test that the --auto-select-threads flag actually selects the threads
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
self.backupset.auto_select_threads = True
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
self.backup_cluster()
# If the threads where auto-selected then a log message should appear
shell = RemoteMachineShellConnection(self.backupset.backup_host)
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
# Remove the logs and test the same thing for restore
shell.execute_command("rm -r {}/logs".format(self.backupset.directory))
self.backupset.force_updates = True
self.backup_restore()
output, _ = shell.execute_command("cat {}/logs/backup-*.log | grep"
" '(Cmd) Automatically set the number"
" of threads to'".format(self.backupset.directory))
if not output:
self.fail("Threads were not automatically selected")
shell.disconnect()
def test_backup_remove_take_backup_range(self):
"""
Test the remove --backups flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"1,3", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
# remove the backup directory
success, _, _ = self.backup_remove(test)
if not success:
self.fail("Failed to remove backups")
self._verify_backup_directory_count(0)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(n=3)
success, _, _ = self.backup_remove(test)
if success:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
self._delete_repo()
# Test based on dynamic file names
self.backup_create()
self._take_n_backups(n=3)
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + '/' if self.objstore_provider else ''}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test non existent directory name
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00")
if success:
self.fail("Should not be able to remove non existent directory")
self._verify_backup_directory_count(3)
# test start > backup start
success, _, _ = self.backup_remove("3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00")
if success:
self.fail("Should not be able to remove by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
success, _, _ = self.backup_remove("{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[-1], dir_names[0]))
if success:
self.fail("Should not be able to remove by directory range where start is after end")
self._verify_backup_directory_count(3)
# test valid single directory
success, _, _ = self.backup_remove("{0}".format(dir_names[0]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name")
self._verify_backup_directory_count(2)
# test valid
success, _, _ = self.backup_remove("{0},{1}".format(dir_names[1], dir_names[-1]))
if not success:
self.fail("Should not have failed to remove directories by backup directory name range")
self._verify_backup_directory_count(0)
def test_backup_merge_date_range(self):
"""
Test the merge --date-range flag it should be able to take:
- backup indexes e.g (0,3)
- backup directory names range
- dd-mm-yyyy ranges
To do this the steps are as follow:
1. Load some data to cluster
2. Create 3 backups
3. Try the different inputs and verify expected outputs
:return:
"""
version = RestConnection(self.backupset.backup_host).get_nodes_version()
if "6.5" > version[:3]:
self.fail("Test not supported for versions pre 6.5.0"
"Version was run with {}".format(version))
# Test based on actual directory names have to be dynamically created based on the directory names.
test_ranges_positive_cases = [
"0,2", # valid index range
"10-01-2000,10-01-3000", # valid date range
]
test_range_invalid_cases = [
"1,-10", # invalid end range negative number
"0,100", # invalid range as there are only 3 backups
"2,0", # invalid range start bigger than end
"01/01/2000,01/01/3000", # invalid date format
"01-30-2000,01-30-3000", # invalid date format
]
# Load some data into the cluser
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
for test in test_ranges_positive_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output , _ = self.backup_merge()
if not status:
self.fail("Failed to merge backups: {0}".format(output))
self._verify_backup_directory_count(1)
self._delete_repo()
for test in test_range_invalid_cases:
# create the backup repository and make three backups
self.backup_create()
self._take_n_backups(3)
self.backupset.date_range = test
status, output, _ = self.backup_merge()
if status:
self.fail("Test should have failed")
self._verify_backup_directory_count(3)
# Test based on dynamic file names
shell = RemoteMachineShellConnection(self.backupset.backup_host)
command = (
f"ls -l {self.backupset.objstore_staging_directory + '/' if self.objstore_provider else ''}"
f"{self.backupset.directory}/{self.backupset.name}"
)
list_dir, _ = shell.execute_command(command)
list_dir = " ".join(list_dir)
shell.disconnect()
dir_names = re.findall(r'(?P<dir>\d{4}-\d{2}-\d{2}T\d{2}_\d{2}_\d{2}\.\d+(?:(?:[+-]\d{2}_\d{2})|Z))', list_dir)
dir_names.sort()
if len(dir_names) != 3:
self.fail("Expected 3 backups instead have {0}".format(len(dir_names)))
# test start > backup start
self.backupset.date_range = "3000-09-30T10_42_37.64647+01_00,3000-09-30T10_43_37.64647+01_00"
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is in the future")
self._verify_backup_directory_count(3)
# test start == backup start end > backup end
self.backupset.date_range = "{0}.64647+01_00,3000-09-30T10_43_37.64647+01_00".format(dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the end is in the future")
self._verify_backup_directory_count(3)
# test start before end
self.backupset.date_range = "{0},{1}".format(dir_names[-1], dir_names[0])
status, _, _ = self.backup_merge()
if status:
self.fail("Should not be able to merge by directory range where the start is after the end")
self._verify_backup_directory_count(3)
# test valid
self.backupset.date_range = "{0},{1}".format(dir_names[0], dir_names[-1])
status, _, _ = self.backup_merge()
if not status:
self.fail("Should not have failed to merge")
self._verify_backup_directory_count(1)
def test_info_while_other_task_runs(self):
"""
Test that info can run at the same time as other backup tasks
1. Load some data to the cluster
2. Create a backup repository
3. Start an async backup
4. Constantly run info
4. It should not expect error
:return:
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self.backup_create()
# Test with backup
backup_result = self.cluster.async_backup_cluster(backupset=self.backupset,
objstore_provider=self.objstore_provider,
resume=self.backupset.resume, purge=self.backupset.purge,
no_progress_bar=self.no_progress_bar,
cli_command_location=self.cli_command_location,
cb_version=self.cb_version)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the backup")
self.sleep(2)
output = backup_result.result(timeout=200)
self.assertTrue(self._check_output("Backup completed successfully", output),
"Backup failed with concurrent info")
# Test with merge
self._take_n_backups(5)
merge_result = self.cluster.async_merge_cluster(backup_host=self.backupset.backup_host,
backups=self.backups,
start=1, end=5,
directory=self.backupset.directory,
name=self.backupset.name,
cli_command_location=self.cli_command_location)
for i in range(10):
_, err = self.backup_info(True)
if err:
self.fail("Should have been able to run at the same time as the merge")
self.sleep(2)
output = merge_result.result(timeout=200)
self.assertTrue(self._check_output("Merge completed successfully", output),
"Merge failed while running info at the same time")
def test_config_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_create(del_old_backup=False)
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_backup_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_cluster()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_info_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
output, _ = self.backup_info()
self.assertIn('the specified bucket does not exist', output[0].lower())
def test_restore_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
self.restore_only = True
output, _ = self.backup_restore()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_remove_without_objstore_bucket(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.objstore_provider.remove_bucket()
_, output, _ = self.backup_remove()
self.assertRegex(output[0].lower(), re.compile("bucket '.*' not found"))
def test_config_create_multiple_repos_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backupset.name = "another_repo"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_create_validate()
def test_backup_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.backup_create_validate()
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
def test_info_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
output, error = self.backup_info()
if error:
self.fail(f"Expected to be able to info backup where staging directory has been removed: {error}")
self.assertEqual(json.loads(output[0])['count'], 1,
"Expected to find a single backup even though the staging directory was removed")
def test_restore_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
self.backup_restore_validate()
def test_remove_with_remove_staging_directory(self):
self.assertIsNotNone(self.objstore_provider, "Test requires an object store provider")
self.backup_create_validate()
self.backup_cluster_validate()
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
self.objstore_provider._remove_staging_directory(remote_client.extract_remote_info().type.lower(), remote_client)
success, _, _ = self.backup_remove()
self.assertTrue(success, "Expected to have removed backups even though the staging directory was removed")
def test_restore_start_after_end(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 1
output, _ = self.backup_restore()
self.assertEqual(len(output), 1)
self.assertIn("range start", output[0])
self.assertIn("cannot be before end", output[0])
def test_restore_single_full_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 1
self._all_buckets_flush()
self.backup_restore_validate()
def test_restore_single_incr_backup(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_full_end_incr(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self._load_all_buckets(self.master, gen, "create")
self.backup_cluster_validate()
self.backupset.start = 1
self.backupset.end = 2
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_start_incr_end_full(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
for _ in range(2):
self.backup_cluster_validate()
self._load_all_buckets(self.master, gen, "create")
self.backupset.full_backup = True
self.backup_cluster_validate()
self.backupset.start = 2
self.backupset.end = 3
self._all_buckets_flush()
self.backup_restore_validate(seqno_compare_function=">=")
def test_cbbackup_with_big_rev(self):
# automation ticket MB-38683
# verified test failed in build 6.6.0-7680 and passed in 6.6.0-7685
from ep_mc_bin_client import MemcachedClient, MemcachedError
bucket = 'default'
value = "value"
expiry = 0
rev_seq = 2**64-1
key = 'test_with_meta'
mc = MemcachedClient(self.master.ip, 11210)
mc.sasl_auth_plain('Administrator', 'password')
mc.bucket_select(bucket)
self.log.info("pushing a key with large rev_seq {0} to bucket".format(rev_seq))
try:
mc.setWithMeta(key, 'value', 0, 0, rev_seq, 0x1512a3186faa0000)
meta_key = mc.getMeta(key)
self.log.info("key meta: {0}".format(meta_key))
except MemcachedError as error:
msg = "unable to push key : {0} error : {1}"
self.log.error(msg.format(key, error.status))
self.fail(msg.format(key, error.status))
client = RemoteMachineShellConnection(self.backupset.backup_host)
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.execute_command("mkdir {0}backup".format(self.tmp_path))
cmd = "{0}cbbackup{1} -u Administrator -p password http://{2}:8091 {3}backup"\
.format(self.cli_command_location, self.cmd_ext, self.master.ip, self.tmp_path)
try:
cbbackup_run = False
output, error = client.execute_command(cmd, timeout=20)
cbbackup_run = True
if not self._check_output("done", error):
self.fail("Failed to run cbbackup with large rev_seq")
except Exception as e:
if e and not cbbackup_run:
self.fail("Failed to run cbbackup with large rev_seq")
finally:
client.execute_command("rm -rf {0}/backup".format(self.tmp_path))
client.disconnect()
def test_backup_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
backup_threads = []
backup_thread_1 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_1)
backup_thread_1.start()
backup_thread_2 = Thread(target=self.backup_cluster)
backup_threads.append(backup_thread_2)
backup_thread_2.start()
for backup_thread in backup_threads:
backup_thread.join()
consistent_metadata = False
for output in self.backup_outputs:
if self._check_output("Error backing up cluster: failed to lock archive", output):
consistent_metadata = True
if not consistent_metadata:
self.fail("Backup does not lock while running backup")
def test_restore_consistent_metadata(self):
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create")
self.backup_create_validate()
self.backup_cluster()
restore_threads = []
restore_thread_1 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_1)
restore_thread_1.start()
restore_thread_2 = Thread(target=self.backup_restore)
restore_threads.append(restore_thread_2)
self.create_bucket_count = 1
restore_thread_2.start()
count = 0
for restore_thread in restore_threads:
restore_thread.join()
consistent_metadata = False
for output in self.restore_outputs:
if self._check_output("Error restoring cluster: failed to lock archive", output):
consistent_metadata = True
break
if not consistent_metadata:
self.fail("Restore does not lock while running restore")
def test_info_backup_merge_remove(self, cluster, no_of_backups):
""" Test Scenario: Create Buckets, Load Documents, Take 'no_of_backups' backups, Merge and Remove a Bucket
This function creates a scenario in which:
1. Buckets are created and loaded with documents.
2. A variable number of Backups >=6 are taken.
3. Backups 2 to 4 are merged.
4. The 2nd last bucket from the end is removed.
Args:
cluster list: A list of 'ServerInfo' that form a cluster to backup.
no_of_backups (int): The number of backups to perform.
"""
# Add built-in user cbadminbucket to backup cluster
self.add_built_in_server_user(node=self.backupset.cluster_host)
# Assemble cluster if more than 1 node in cluster
if len(cluster) > 1:
self.cluster.async_rebalance(cluster, cluster[1:], []).result()
# Take 'no_of_backups' backups
self.backup_create()
self._take_n_backups(n=no_of_backups)
# Merge
self.backupset.start, self.backupset.end = 2, 4
self.backup_merge()
# Delete a bucket
self.backup_remove(self.backups.pop(-2), verify_cluster_stats=False)
def test_magma_couchstore_compatibility(self):
""" Test that couchstore and magma are compatible
Backup couchstore > restore to magma
Backup magma > restore to couchstore
"""
restore_backend = "couchstore" if self.input.param("bucket_storage", "") == "magma" else "magma"
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", 0)
self.log.info("*** done loading items to all buckets")
self.backup_create_validate()
self.backup_cluster_validate()
# Tear down and replace bucket with opposite storage backend
rest_client = RestConnection(self.master)
rest_client.delete_bucket()
rest_client.create_bucket(bucket="default", ramQuotaMB=256,
storageBackend=restore_backend, replicaNumber=0)
self.backup_restore_validate()
def test_ee_only_features(self):
""" Test that EE only features do not work on CE servers
NOTE: PITR currently does nothing, so succeeds on CE.
This should be included when PITR is added properly
This is also true for:
Backing up users,
Auto rebuild of indexes
Params:
examine (bool): Whether to test examine.
merge (bool): Whether to test merge.
s3 (bool): Whether to test s3 cloud backup.
consistency_check (bool): Whether to test consistency_check.
coll_restore (bool): Whether to test collection/scope level restore.
"""
examine = self.input.param('examine', False)
merge = self.input.param('merge', False)
s3 = self.input.param('s3', False)
consistency_check = self.input.param('consistency_check', False)
coll_restore = self.input.param('coll_restore', False)
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = f"{self.cli_command_location}/cbbackupmgr"
sub_command = ""
self.backup_create()
if examine:
sub_command = 'examine -a archive -r repo -k asdf --collection-string asdf.asdf.asdf'
elif merge:
sub_command = 'merge -a archive -r repo'
elif s3:
sub_command = f'backup -a s3://backup -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password'
elif consistency_check:
sub_command = f'backup -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --consistency-check 1'
elif coll_restore:
sub_command = f'restore -a {self.backupset.directory} -r {self.backupset.name}\
-c {self.backupset.backup_host.ip}:{self.backupset.backup_host.port}\
-u Administrator -p password --include-data asdf.asdf.asdf'
if not sub_command:
self.fail("Must provide a subcommand!")
output, error = remote_client.execute_command(f"{command} {sub_command}")
self.log.info(f"ERROR from command: {error}")
self.log.info(f"OUTPUT from command: {output}")
if s3 and "7.0.0" in self.cb_version:
# The s3 error message differs slightly in 7.0.0
self.assertIn("an enterprise only feature", output[0])
else:
self.assertIn("an Enterprise Edition feature", output[0])
def test_analytics_synonyms(self):
""" Test analytics synonyms can be restored
Params:
dataverses (int): Number of dataverses to create.
datasets (int): Number of datasets to create.
synonyms (int): Number of synonyms to create.
"""
class Query:
""" A class to execute analytics queries """
def __init__(self, server, username, password):
self.restconn = RestConnection(server)
def execute(self, query):
return self.restconn.execute_statement_on_cbas(query, None)
def get_synonyms(self):
synonyms = set()
for result in json.loads(self.execute("select * from Metadata.`Synonym`"))['results']:
synonym = result['Synonym']
synonym_name = synonym['SynonymName']
synonym_target = synonym['ObjectDataverseName'] + '.' + synonym['ObjectName']
synonym_dataverse = synonym['DataverseName']
synonyms.add((synonym_name, synonym_target, synonym_dataverse))
return synonyms
def get_synonyms_count(self):
return json.loads(self.execute("select count(*) as count from Metadata.`Synonym`;"))['results'][0]['count']
class Dataset:
def __init__(self, name, bucket, clause=None):
self.name, self.bucket, self.clause = name, bucket, clause
def get_where_clause(self):
return f" WHERE {self.clause}" if self.clause else ""
class Synonym:
def __init__(self, name, target):
self.name, self.target = name, target
class Dataverse:
def __init__(self, name):
self.name = name
self.datasets = set()
self.synonyms = set()
def add_dataset(self, dataset):
self.datasets.add(dataset)
def add_synonym(self, synonym):
self.synonyms.add(synonym)
def next_dataset_name(self):
return f"dat_{len(self.datasets)}"
def next_synonym_name(self):
return f"syn_{len(self.synonyms)}"
class Analytics:
def __init__(self, query):
self.query, self.dataverses = query, set()
def add_dataverse(self, dataverse):
self.dataverses.add(dataverse)
def next_dataverse_name(self):
return f"dtv_{len(self.dataverses)}"
def pick_target_for_synonym(self):
choices = [f"{dataverse.name}.{dataset.name}" for dataverse in self.dataverses for dataset in dataverse.datasets]
if choices:
return choice(choices)
return None
def create(self):
# Create daterverses and datasets
for dataverse in self.dataverses:
self.query.execute(f"CREATE dataverse {dataverse.name}")
for dataset in dataverse.datasets:
self.query.execute(f"CREATE DATASET {dataverse.name}.{dataset.name} ON {dataset.bucket}{dataset.get_where_clause()}")
# Create synonyms
for dataverse in self.dataverses:
for synonym in dataverse.synonyms:
self.query.execute(f"CREATE analytics synonym {dataverse.name}.{synonym.name} FOR {synonym.target}")
def delete(self):
for dataverse in self.dataverses:
for dataset in dataverse.datasets:
self.query.execute(f"DROP DATASET {dataverse.name}.{dataset.name}")
for synonym in dataverse.synonyms:
self.query.execute(f"DROP analytics synonym {dataverse.name}.{synonym.name}")
self.query.execute(f"DROP dataverse {dataverse.name}")
class AnalyticsTest:
def __init__(self, backup, no_of_dataverses, no_of_datasets, no_of_synonyms, analytics_server):
# The base class
self.backup = backup
# Test parameters
self.no_of_dataverses, self.no_of_datasets, self.no_of_synonyms = no_of_dataverses, no_of_datasets, no_of_synonyms
# The number of synonyms that get created
self.no_of_synonyms_created = no_of_dataverses * no_of_synonyms
# The object thats used to run queries on the server running analytics
self.query = Query(analytics_server, analytics_server.rest_username, analytics_server.rest_password)
# The object that represents our current model of analytics
self.analytics = Analytics(self.query)
def test_analytics(self):
# Define the analytics model (i.e. which dataverses, datasets and synonyms are present)
for i in range(self.no_of_dataverses):
dataverse = Dataverse(self.analytics.next_dataverse_name())
self.analytics.add_dataverse(dataverse)
for j in range(self.no_of_datasets):
dataset = Dataset(dataverse.next_dataset_name(), 'default')
dataverse.add_dataset(dataset)
for j in range(self.no_of_synonyms):
synonym = Synonym(dataverse.next_synonym_name(), self.analytics.pick_target_for_synonym())
dataverse.add_synonym(synonym)
# Create dataverses, datasets and synonyms
self.analytics.create()
self.backup.assertEqual(self.query.get_synonyms_count(), self.no_of_synonyms_created)
# Create a repository
self.backup.backup_create()
# Take a backup
self.backup.backup_cluster()
# Delete all analytics related stuff
self.analytics.delete()
self.backup.assertEqual(self.query.get_synonyms_count(), 0)
# Perform a one off restore
self.backup.backup_restore()
synonyms = self.query.get_synonyms()
# Check synonyms have been restored
for dataverse in self.analytics.dataverses:
for synonym in dataverse.synonyms:
self.backup.assertIn((synonym.name, synonym.target, dataverse.name), synonyms)
# The server that will be reprovisioned with analytics
analytics_server = self.restore_cluster_host = self.servers[2]
# Add a server and provision it with analytics
self.add_server_with_custom_services(analytics_server, services=["cbas"])
# A little sleep for services to warmup
self.assertTrue(RestConnection(analytics_server).wait_until_cbas_is_ready(100))
# Run the analytics test
AnalyticsTest(self, self.input.param("dataverses", 5), self.input.param("datasets", 5), self.input.param("synonyms", 5), analytics_server).test_analytics()
def test_info_after_backup_merge_remove(self):
""" CBQE-5475: Test cbbackupmgr info comprehensively after performing backup, merge and remove
Test params:
flag_depth = [0,1,2,3]
check_tabular = [True, False]
check_all_flag = [True, False]
dgm_run = [True, False]
sasl_buckets >= 1
Comprehensive test: flag_depth=3,check_tabular=True,check_all_flag=True,dgm_run=True,sasl_buckets=2
Scenario:
Perform backup, merge and remove to mutate info output.
Cases tested:
flag_depth>=0: --archive,
flag_depth>=1: --archive --repo
flag_depth>=2: --archive --repo --backup
flag_depth>=3: --archive --repo --backup --collection-string in version>7.0/--bucket in version<=6.6
Output types tested for each of the previous cases:
check_tabular>=False: using --json flag (Checks JSON output)
check_tabular = True: no --json flag (Parses tabular output to reflect JSON output)
State of all flag:
check_all_flag>=False:
using --all flag (e.g. for --archive --all checks all repos in archive, backups in repos, buckets in backups)
check_all_flag = True:
--all flag (e.g. for --archive checks contents of archive only)
Total number of cases: 4 (cases) * 2 (output types) * 2 (all flag state) = 16
"""
import os
import pprint
import itertools
import parse_cbbackupmgr_info as parse_info
pp = pprint.PrettyPrinter(indent=4)
# Params
flag_depth = self.input.param('flag_depth', 3)
check_tabular = self.input.param('check_tabular', True)
check_all_flag = self.input.param('check_all_flag', True)
# The minimum number of backups is 6
min_backups = 6
no_of_backups = max(self.backupset.number_of_backups, min_backups)
if self.backupset.number_of_backups < min_backups:
self.log.warn("number_of_backups increased from {} to {}".format(self.backupset.number_of_backups, min_backups))
# Select backup cluster
cluster = [self.backupset.cluster_host]
# Create Buckets, Load Documents, Take n backups, Merge and Remove a Bucket
self.test_info_backup_merge_remove(cluster, no_of_backups)
# Create lists of expected output from the info command
types = set(['FULL', 'MERGE - FULL', 'MERGE - INCR', 'INCR'])
expected_archs = [os.path.basename(self.backupset.directory)]
expected_repos = [self.backupset.name]
expected_backs = {self.backupset.name: self.backups}
expected_bucks = [bucket.name for bucket in self.buckets]
def check_arch(arch, tabular=False):
""" Checks the archive dictionary.
Args:
arch (dict): A dictionary containing archive information.
Returns:
list: A list containing the repositories in the archive.
"""
expected_keys = [u'archive_uuid', u'name', u'repos']
self.assertTrue(set(expected_keys).issubset(arch.keys()))
archive_uuid, name, repos = [arch[key] for key in expected_keys]
# Check archive name is correct
self.assertTrue(name in expected_archs)
# Check repos names are correct
self.assertEqual(set(expected_repos), set(repo['name'] for repo in repos))
# Check repo size is > 0
self.assertTrue(all(repo['size'] > 0 for repo in repos))
# Check backup sizes are correct
self.assertTrue(all(repo['count'] == len(expected_backs[repo['name']]) for repo in repos))
return repos
def check_repo(repo, tabular=False):
""" Checks the repository dictionary.
Args:
repo (dict): A dictionary containing repository information.
Returns:
list: A list containing the backups in the repository.
"""
expected_keys = [u'count', u'backups', u'name', u'size']
self.assertTrue(set(expected_keys).issubset(repo.keys()))
count, backups, name, size = [repo[key] for key in expected_keys]
# Check repo name is correct
self.assertTrue(name in expected_repos)
# Check repo size is greater than 0
self.assertTrue(size > 0)
# Check number of backups is correct
self.assertEqual(len(backups), len(expected_backs[name]))
# Check backup names
self.assertEqual(set(backup['date'] for backup in backups), set(expected_backs[name]))
# Check backup types
self.assertTrue(set(backup['type'] for backup in backups).issubset(types))
# Check complete status
self.assertTrue(all(backup['complete'] for backup in backups))
return backups
def check_back(backup, tabular=False):
""" Checks the backup dictionary.
Args:
backup (dict): A dictionary containing backup information.
Returns:
list: A list containing the buckets in the backup.
"""
expected_keys = [u'complete', u'fts_alias', u'buckets',
u'source_cluster_uuid', u'source', u'date', u'type', u'events', u'size']
self.assertTrue(set(expected_keys).issubset(backup.keys()))
complete, fts_alias, buckets, source_cluster_uuid, source, date, _type_, events, size = \
[backup[key] for key in expected_keys]
# Check backup name is correct
self.assertTrue(date in self.backups)
# Check backup size is greater than 0
self.assertTrue(size > 0)
# Check type exists
self.assertTrue(_type_ in types)
# Check bucket names
self.assertEqual(set(bucket['name'] for bucket in buckets), set(expected_bucks))
# Check bucket sizes
self.assertTrue(all(bucket['size'] >= 0 for bucket in buckets))
# Check items are equal to self.num_items
self.assertTrue(all(bucket['items'] in [0, self.num_items] for bucket in buckets))
return buckets
def check_buck(bucket, tabular=False):
""" Checks the bucket dictionary.
Args:
bucket (dict): A dictionary containing bucket information.
Returns:
None
"""
expected_keys = [u'index_count', u'views_count', u'items', u'mutations',
u'tombstones', u'fts_count', u'analytics_count', u'size', u'name']
self.assertTrue(set(expected_keys).issubset(bucket.keys()))
index_count, views_count, items, mutations, tombstones, fts_count, \
analytics_count, size, name = [bucket[key] for key in expected_keys]
# Check bucket name
self.assertTrue(name in expected_bucks)
# Check bucket size
self.assertTrue(size >= 0)
# Check bucket items
self.assertTrue(items in [0, self.num_items])
def print_tree(tree):
if self.debug_logs:
pp.pprint(tree)
def parse_output(use_json, output):
""" Parses the JSON/Tabular output into a Python dictionary
Args:
use_json (bool): If True expects JSON output to parse. Otherwise, expects tabular data to parse.
output (list): JSON or Tabular data to parse into a dictionary.
Returns:
dict: A dictionary containing the parsed output.
"""
return json.loads(output[0]) if use_json else parse_info.construct_tree(output)
# Configure initial flags
json_options, all_flag_options = [True], [False]
# Enable tabular output tests
if check_tabular:
json_options.append(False)
# Enable all flag tests
if check_all_flag:
all_flag_options.append(True)
def output_logs(flag_depth, use_json, all_flag):
""" Outputs flags tested in current test case."""
use_json = "--json" if use_json else ""
all_flag = "--all" if all_flag else ""
flags = " ".join(["--archive", "--repo", "--backup", "--bucket"][: flag_depth + 1])
self.log.info("---")
self.log.info(f"Testing Flags: {flags} {use_json} {all_flag}")
self.log.info("---")
# Perform tests
for use_json, all_flag in itertools.product(json_options, all_flag_options):
output_logs(0, use_json, all_flag)
# cbbackupmgr info --archive
arch = parse_output(use_json, self.get_backup_info(json=use_json, all_flag=all_flag))
print_tree(arch)
repos = check_arch(arch)
if all_flag:
[check_buck(buck) for repo in repos for back in check_repo(repo) for buck in check_back(back)]
if flag_depth < 1:
continue
output_logs(1, use_json, all_flag)
# cbbackupmgr info --archive --repo
for repo_name in expected_repos:
repo = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, all_flag=all_flag))
print_tree(repo)
backs = check_repo(repo)
if all_flag:
[check_buck(buck) for back in backs for buck in check_back(back)]
if flag_depth < 2:
continue
output_logs(2, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
back = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name, backup=back_name, all_flag=all_flag))
print_tree(back)
bucks = check_back(back)
if all_flag:
[check_buck(buck) for buck in bucks]
if flag_depth < 3:
continue
output_logs(3, use_json, all_flag)
# cbbackupmgr info --archive --repo --backup --bucket
for repo_name in expected_repos:
for back_name in expected_backs[repo_name]:
for buck_name in expected_bucks:
buck = parse_output(use_json, self.get_backup_info(json=use_json, repo=repo_name,
backup=back_name, collection_string=buck_name, all_flag=all_flag))
print_tree(buck)
check_buck(buck)
|
deepQlearning.py
|
from torch import nn, multiprocessing as mp, Tensor
from franQ.Replay.wrappers import TorchDataLoader
import itertools
import typing as T, logging
from franQ.Agent.conf import AgentConf, AttrDict
from collections import OrderedDict
import torch
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
from threading import Thread
from queue import Queue
from .utils.common import soft_update, hard_update
from .components import soft_actor_critic, encoder
ExperienceDict_T = T.Dict[str, Tensor]
class DeepQLearning(nn.Module):
def __init__(self, conf: AgentConf, replays, **kwargs):
nn.Module.__init__(self)
conf: AgentConf = conf if isinstance(conf, AttrDict) else AttrDict().from_dict(conf)
self.conf = conf
self.param_queue = kwargs.get("param_queue", mp.Queue(maxsize=1))
if conf.use_async_train:
if not kwargs.get("train_process", False):
# make another process for doing parameter updates asynchronously
mp.Process(target=DeepQLearning, args=[conf.to_dict(), replays],
kwargs={"param_queue": self.param_queue,
"train_process": True}).start()
Thread(target=self._pull_params).start() # grab updated params from the other process
else:
# Sync mode means rollout and trainer are in same process, so must be on same device
conf.training_device = conf.inference_device
# Logging
self.summary_writer = SummaryWriter(Path(conf.log_dir) / ("Trainer" if "train_process" in kwargs else "Actor"))
self.fast_params = []
# Models
# Default encoder type
self.encoder = encoder.Encoder(conf)
self.target_encoder = encoder.Encoder(conf) if conf.use_target_encoder else self.encoder
hard_update(self.target_encoder, self.encoder)
self.fast_params += list(self.encoder.parameters())
if conf.use_distributional_sac:
from .components.distributional_soft_actor_critic import DistributionalSoftActorCritic
self.actor_critic = DistributionalSoftActorCritic(conf, conf.latent_state_dim)
else:
self.actor_critic = soft_actor_critic.SoftActorCritic(conf, conf.latent_state_dim)
self.fast_params += list(self.actor_critic.parameters())
self.step = 0
train_proc = kwargs.get("train_process", False)
if train_proc or (not conf.use_async_train):
if not train_proc:
conf.inference_device = conf.training_device
self.replays = [TorchDataLoader(r, conf.training_device, conf.dtype) for r in replays]
self.to(conf.training_device)
self.optimizers = [torch.optim.Adam(
self.parameters(),
lr=self.conf.learning_rate
)]
if kwargs.get("train_process", False):
dump_q = Queue(maxsize=1)
Thread(target=self._push_params, args=[dump_q]).start()
for step_train in itertools.count():
self.train_step()
if (step_train % self.conf.param_update_interval) == 0:
# Signal the _push_params thread that we've updated enough times to warrant a push
if dump_q.empty(): dump_q.put_nowait(None)
def train_step(self):
for replay in self.replays:
experience_dict = replay.temporal_sample()
task_loss = self.get_losses(experience_dict)
[o.zero_grad() for o in self.optimizers]
task_loss.backward()
nn.utils.clip_grad_norm_(self.parameters(), self.conf.clip_grad_norm)
[o.step() for o in self.optimizers]
self.update_targets()
self.reset()
self.conf.global_step.value += 1
@property
def iteration(self):
return int(self.conf.global_step.value)
def parameters(self, *args, **kwargs):
return self.fast_params
def _push_params(self, q: Queue):
while True:
_ = q.get()
state_dict = self.state_dict()
state_dict = OrderedDict({k: v.to("cpu:0") for k, v in state_dict.items()})
self.param_queue.put(state_dict)
def _pull_params(self):
while True:
params = self.param_queue.get()
self.load_state_dict(params), logging.info("loaded state dict")
@staticmethod
def dict_to(state_dict: OrderedDict, device=None):
device = torch.device("cpu:0") or device
return OrderedDict({k: v.to(device) for k, v in state_dict.items()})
def act(self, experiences: T.Dict[str, Tensor]):
"""Run agent inference."""
if not self.conf.use_async_train:
# If async mode is disabled, take a training step here!
if all([r.ready() for r in self.replays]):
self.train_step()
with torch.no_grad():
latent_state = self.encoder.forward_eval(experiences)
explore_action, log_prob, exploit_action = self.actor_critic.act(latent_state)
if self.conf.num_instances > 1:
# Vectorized Choose: whether to explore or exploit
exploit_mask = (experiences["idx"] == 0).view(-1, 1)
action = (exploit_action * exploit_mask) + (explore_action * torch.logical_not(exploit_mask))
else:
action = explore_action # explore always if we only have 1 environment
# if self.conf.discrete: action = action.argmax(-1, True) # go from one-hot encoding to sparse
return action
def reset(self):
self.encoder.reset()
def update_targets(self):
if self.conf.use_hard_updates:
# hard updates should only be done once every N steps.
if self.step % self.conf.hard_update_interval: return
update = hard_update
else:
update = soft_update
self.actor_critic.update_target()
update(self.target_encoder, self.encoder, self.conf.tau)
def get_losses(self, experience: T.Dict[str, Tensor]):
# Step 0: grab metadata
conf = self.conf
experience["mask"] = torch.logical_not(experience["task_done"])
is_contiguous = experience["episode_step"][1:] == (experience["episode_step"][:-1] + 1)
# Step 1: Get temporal difference pairs
curr_xp, next_xp = self._temporal_difference_shift(experience)
# Run the encoder to get the latent state
enc_kwargs = {
"is_contiguous": is_contiguous,
"sequence_len": conf.temporal_len,
"batch_size": conf.batch_size
}
curr_xp["state"] = self.encoder.forward_train(curr_xp, **enc_kwargs)
with torch.no_grad():
next_xp["state"] = self.target_encoder.forward_train(next_xp, **enc_kwargs)
# Step 4: Convert discrete actions to 1-hot encoding
if conf.discrete:
curr_xp["action_onehot"] = torch.eye(
conf.action_space.n,
device=curr_xp["action"].device, dtype=curr_xp["action"].dtype
)[curr_xp["action"].view(curr_xp["action"].shape[:-1]).long()]
# Step 4: Get the Critic Losses with deep Q Learning
# Note: NEXT STATE's reward & done used. Very important that this is consistent!!
q_loss, bootstrapped_lowerbound_loss, q_summaries = self.actor_critic.q_loss(curr_xp, next_xp)
# Get Policy Loss
pi_loss, alpha_loss, pi_summaries = self.actor_critic.actor_loss(curr_xp)
# Sum it all up
assert q_loss.shape == pi_loss.shape == is_contiguous.shape == alpha_loss.shape, \
f"loss shape mismatch: q={q_loss.shape} pi={pi_loss.shape} c={is_contiguous.shape} a={alpha_loss.shape}"
task_loss = ((q_loss + pi_loss + alpha_loss) * is_contiguous).mean() # Once its recurrent, they all use TD
if conf.use_bootstrap_minibatch_nstep:
bootstrapped_lowerbound_loss = (bootstrapped_lowerbound_loss * is_contiguous.prod(0)).mean()
task_loss = task_loss + bootstrapped_lowerbound_loss
# Step 11: Write Scalars
if (self.step % self.conf.log_interval) == 0:
self.summary_writer.add_scalars("Trainer/RL_Loss", {"Critic": q_loss.mean().item(),
"Actor": pi_loss.mean().item(),
"Alpha": alpha_loss.mean().item(), },
self.step)
[self.summary_writer.add_scalar(f"Trainer/Critic_{k}", v, self.step) for k, v in q_summaries.items()]
[self.summary_writer.add_scalar(f"Trainer/Actor_{k}", v, self.step) for k, v in pi_summaries.items()]
self.step += 1
return task_loss / conf.temporal_len
@staticmethod
def _temporal_difference_shift(experience_dict: ExperienceDict_T) -> T.Tuple[ExperienceDict_T, ...]:
# make all experiences in the TD learning form
curr_state, next_state = {}, {}
for key, val in experience_dict.items():
curr_state[key] = val[:-1]
next_state[key] = val[1:]
return curr_state, next_state
|
server.py
|
import http.server
import json
import queue
import signal
import socketserver
import socket
import struct
import time
import threading
from io import BytesIO
running = True
pos = (0.0, 0.0, 0.0)
q = queue.Queue()
class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
return http.server.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
global pos
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
data = json.loads(body)
if data['action'] == 'SET' or data['action'] == 'GET':
q.put(data)
self.send_response(200)
self.end_headers()
response = BytesIO()
if data['action'] == 'GET':
response.write(json.dumps(pos).encode('utf-8'))
elif data['action'] == 'SET':
response.write(b'"Good"');
else:
response.write(b'"Bad"');
self.wfile.write(response.getvalue())
class client(threading.Thread):
def __init__(self):
super(client, self).__init__()
self.sock = None
self.connected = False
def getPos(self):
try:
self.sock.send(b'GET')
except:
print("ERROR: Send failed.")
self.sock.close()
self.connected = False
return None
try:
buf = self.sock.recv(1024)
return struct.unpack('ddd', buf)
except:
print("ERROR: Recv failed.")
self.sock.close()
self.connected = False
return None
def setPos(self, pos):
try:
self.sock.send(b'SET' + struct.pack('ddd', float(pos['lat']), float(pos['lng']), 8.0))
return True
except:
print('ERROR: Send failed')
self.sock.close()
self.connected = False
return None
def connect(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('127.0.0.1', 5678))
except:
self.sock = None
print("ERROR: Unable to connect to server, reconnecting...")
def run(self):
global running
global pos
self.lastGet = time.time()
while running:
if not self.connected:
self.connect()
if self.sock == None:
continue
else:
self.connected = True
self.lastGet = time.time()
pos = self.getPos()
print("Connected.")
if time.time() - self.lastGet > 10:
pos = self.getPos()
self.lastGet = time.time()
while not q.empty() and self.connected and running:
msg = q.get()
if msg['action'] == 'SET':
self.setPos(msg['pos'])
elif msg['action'] == 'GET':
self.getPos()
time.sleep(0.1)
def signal_handler(signals, frame):
global running
print('Shutting down!')
running = False
def main():
global running
signal.signal(signal.SIGINT, signal_handler)
running = True
clientThread = client()
clientThread.start()
print("Client thread started.")
handler = HTTPRequestHandler
httpd = socketserver.TCPServer(("", 8080), handler)
serverThread = threading.Thread(target=httpd.serve_forever)
serverThread.start()
print("HTTP Server listening on 127.0.0.1:8080")
while running:
time.sleep(0.1)
httpd.shutdown()
if __name__ == '__main__':
main()
|
channel_test.py
|
import itertools
import multiprocessing
import threading
import time
import pytest
from omnidiff import channel
from omnidiff.channel import QueueChannel as Channel
def test_put_get():
"""
Channel is a FIFO queue. Write with put() or put_many(), then read with
get().
"""
chnl = Channel()
chnl.put(1)
chnl.put(2)
assert chnl.get() == 1
chnl.put(3)
assert chnl.get() == 2
assert chnl.get() == 3
chnl.put_many([4, 5])
assert chnl.get() == 4
assert chnl.get() == 5
def test_iterate():
"""
You can read a Channel to the end by iterating it.
"""
chnl = Channel()
chnl.put_many(range(5))
chnl.end()
assert list(chnl) == [0, 1, 2, 3, 4]
def test_finished():
"""
Once you end a channel, get() raises Finished.
"""
chnl = Channel()
chnl.end()
with pytest.raises(channel.Finished):
chnl.get()
with pytest.raises(channel.Finished):
chnl.get()
chnl = Channel()
chnl.put_many([1, 2, 3]).end()
assert chnl.get() == 1
assert chnl.get() == 2
assert chnl.get() == 3
with pytest.raises(channel.Finished):
chnl.get()
with pytest.raises(channel.Finished):
chnl.get()
def test_cancelled():
"""
Once you cancel a Channel, get() raises Cancelled.
"""
chnl = Channel()
chnl.cancel()
with pytest.raises(channel.Cancelled):
chnl.get()
with pytest.raises(channel.Cancelled):
chnl.get()
def test_priority():
"""
FIFO behaviour can be changed to a priority queue.
"""
chnl = Channel(priority=True)
values = (3, 2, 1, 1, 2, 3, 7)
chnl.put_many(values).end()
assert tuple(chnl) == tuple(sorted(values))
# And this still works with the gets mixed into the puts
chnl = Channel(priority=True)
chnl.put(2)
chnl.put(1)
assert chnl.get() == 1
chnl.put(0)
assert chnl.get() == 0
chnl.put(3)
assert chnl.get() == 2
assert chnl.get() == 3
def test_priority_callable():
"""
Prioritised channel can have custom sort order.
"""
chnl = Channel(priority=lambda value: -value)
values = (3, 2, 1, 1, 2, 3, 7)
chnl.put_many(values).end()
assert tuple(chnl) == tuple(sorted(values, reverse=True))
# And this still works with the gets mixed into the puts
chnl = Channel(priority=lambda value: -value)
chnl.put(2)
chnl.put(1)
assert chnl.get() == 2
chnl.put(0)
assert chnl.get() == 1
chnl.put(3)
assert chnl.get() == 3
assert chnl.get() == 0
def test_cancel_context():
"""
cancel_context() auto-cancels channels on exit.
"""
with Channel().cancel_context() as chnl:
chnl.put_many([1, 2])
assert chnl.get() == 1
with pytest.raises(channel.Cancelled):
chnl.get()
def test_exception_base():
"""
The Channel exceptions have a common base class.
"""
assert issubclass(channel.Finished, channel.ChannelError)
assert issubclass(channel.Cancelled, channel.ChannelError)
def test_suppressed():
"""
The Channel exceptions both have a special suppress helper.
"""
chnl = Channel()
chnl.end()
with channel.Finished.suppress():
chnl.get()
chnl = Channel()
chnl.cancel()
with channel.Cancelled.suppress():
chnl.get()
def test_suppressed_decorator():
"""
Channel exception suppression also works as a decorator.
"""
@channel.Finished.suppress()
def get():
chnl.get()
chnl = Channel()
chnl.end()
get()
chnl = Channel()
chnl.cancel()
with pytest.raises(channel.Cancelled):
get()
channel.Cancelled.suppress()(get)()
def test_end_states():
"""
Once cancelled, a Channel stays that way. If ended, though,
it can be cancelled to prevent any remaining data being read.
"""
chnl = Channel()
chnl.cancel()
with pytest.raises(channel.Cancelled):
chnl.put(1)
with pytest.raises(channel.Cancelled):
chnl.get()
chnl.cancel()
with pytest.raises(channel.Cancelled):
chnl.get()
chnl.end()
with pytest.raises(channel.Cancelled):
chnl.get()
chnl = Channel()
chnl.end()
with pytest.raises(channel.Finished):
chnl.get()
with pytest.raises(channel.Finished):
chnl.put(1)
chnl.end()
with pytest.raises(channel.Finished):
chnl.get()
chnl.cancel()
with pytest.raises(channel.Cancelled):
chnl.get()
chnl = Channel()
chnl.put_many([1, 2, 3])
chnl.end()
assert chnl.get() == 1
chnl.cancel()
with pytest.raises(channel.Cancelled):
chnl.get()
def test_wrapped_channel():
"""
We can wrap a channel to modify what gets read or written
"""
class TestChannel(channel.WrappedChannel):
def put(self, value):
if value % 2 == 1:
super().put(value)
def get(self):
while True:
raw = super().get()
if raw < 5:
break
return raw ** 2
chnl = TestChannel(Channel())
chnl.put_many([1, 2, 3, 4, 5]).end()
assert chnl.get() == 1
assert chnl.get() == 9
with pytest.raises(channel.Finished):
chnl.get()
with pytest.raises(channel.Finished):
chnl.check()
def test_wrapped_close():
"""
WrappedChannel has a method you can overload to be notified when either
end() or cancel() is called, instead of having to overload both.
"""
class TestChannel(channel.WrappedChannel):
def __init__(self, other):
super().__init__(other)
self.close_called = False
def close(self):
self.close_called = True
chnl = TestChannel(Channel())
chnl.put_many([1, 2, 3])
assert not chnl.close_called
chnl.end()
assert chnl.close_called
assert list(chnl) == [1, 2, 3]
chnl = TestChannel(Channel())
chnl.put_many([1, 2, 3])
assert not chnl.close_called
chnl.cancel()
assert chnl.close_called
assert list(chnl) == []
def test_thread_source():
"""
thread_source starts a thread which writes items to a Channel.
"""
# This is the basic behaviour.
chnl = channel.thread_source(range(10))
assert list(chnl) == list(range(10))
# Now test that it really is running in another thread.
def source():
yield threading.get_ident()
chnl = channel.thread_source(source())
assert chnl.get() != threading.get_ident()
# Test that cancelling the channel ends the thread.
success = threading.Event()
delta = 0.1
def source():
yield 1
try:
while True:
time.sleep(delta)
yield 2
except GeneratorExit:
# If we make it here, GeneratorExit was raised from yield,
# meaning the iterator was garbage collected, meaning the thread
# that was running it is finished.
success.set()
with channel.thread_source(source()).cancel_context() as chnl:
assert len(threading.enumerate()) == 2
assert chnl.get() == 1
# wait() returns False if it times out
assert success.wait(10 * delta)
assert len(threading.enumerate()) == 1
def test_raising_iterable():
"""
If the iterable for a thread_source raises, the Channel is cancelled.
"""
# We could actually use a Channel for the notification, but let's keep
# it clear what Channel we're testing by only having one.
ready = threading.Event()
def raiser():
yield 1
ready.wait()
raise Exception()
chnl = channel.thread_source(raiser())
assert chnl.get() == 1
ready.set()
with pytest.raises(channel.Cancelled):
chnl.get()
def test_thread_source_channel():
"""
Everything thread_source does, it can do when the output channel is
specified by the user insted of created by the call to thread_source()
"""
my_chnl = Channel()
my_chnl.put_many(range(5))
# This is the basic behaviour.
chnl = channel.thread_source(range(10), channel=my_chnl)
assert chnl is my_chnl
assert list(chnl) == list(range(5)) + list(range(10))
# TODO: we could build this test out to do more of what test_thread_source
# and test_raising_iterable do.
def run_daemon(): # pragma: no cover: runs in subprocess
def forever():
while True:
time.sleep(10)
yield 1
channel.thread_source(forever(), daemon=True)
raise SystemExit(23)
def test_thread_daemon():
"""
Daemon thread does not prevent process exit.
"""
proc = multiprocessing.Process(target=run_daemon, daemon=True)
proc.start()
proc.join(1)
assert proc.exitcode == 23
@pytest.mark.parametrize('count', [1, 5, 100])
def test_thread_crew(count):
"""
thread_crew() creates multiple worker threads, reading from one Channel
and writing to another.
"""
def worker(value, check, out_chnl):
out_chnl.put((value, threading.get_ident()))
if value == count - 1:
out_chnl.end()
# Now sleep until the work is finished, so that every value is run by a
# different thread, and hence we can test the number of threads.
while True:
out_chnl.check()
time.sleep(0.1)
requests, responses = channel.thread_crew(count, worker, mode='1:m')
requests.put_many(range(count)).end()
outputs = tuple(responses)
assert len(outputs) == count
assert {output[0] for output in outputs} == set(range(count))
assert len({output[1] for output in outputs}) == count
@pytest.mark.parametrize('count', [1, 5, 100])
def test_thread_crew_mode(count):
"""
thread_crew_mode() can be any of '1:1', '1:m', 'm:m'. In 1:1 mode, the
worker function is simply called with a value read off one Channel, and
returns a value to be written to the other. In '1:m' mode, it is passed a
value plus the output Channel, and writes as many values as it likes. In
m:m mode it is passed both Channels, and has full control.
"""
def worker_1_1(value, check_cancelled):
return 2 * value
def worker_1_m(value, check_cancelled, out_chnl):
out_chnl.put(2 * value)
out_chnl.put(2 * value + 1)
def worker_m_m(in_chnl, out_chnl):
while True:
value = in_chnl.get()
out_chnl.put_many(range(3 * value, 3 * value + 3))
# The default is 1:1
requests, responses = channel.thread_crew(count, worker_1_1)
requests.put_many(range(1000)).end()
assert set(responses) == set(range(0, 2000, 2))
requests, responses = channel.thread_crew(count, worker_1_1, mode='1:1')
requests.put_many(range(1000)).end()
assert set(responses) == set(range(0, 2000, 2))
# Other workers produce different results
requests, responses = channel.thread_crew(count, worker_1_m, mode='1:m')
requests.put_many(range(1000)).end()
assert set(responses) == set(range(2000))
requests, responses = channel.thread_crew(count, worker_m_m, mode='m:m')
requests.put_many(range(1000)).end()
assert set(responses) == set(range(3000))
with pytest.raises(ValueError):
channel.thread_crew(count, worker_1_1, mode='bad_value')
@pytest.mark.parametrize('cancel_requests', [True, False])
def test_thread_crew_cancel(cancel_requests):
"""
Cancelling either channel cancels the Crew.
"""
def worker_1_1(value, check_cancelled):
while True:
check_cancelled()
time.sleep(0.1)
requests, responses = channel.thread_crew(100, worker_1_1)
requests.put_many(range(100))
def do_cancel():
if cancel_requests:
requests.cancel()
else:
responses.cancel()
def check_is_cancelled():
with pytest.raises(channel.Cancelled):
if cancel_requests:
# Blocks until the channel is cancelled by the crew.
responses.get()
else:
# 10 seconds should be plenty of time for the crew to respond.
for _ in range(100): # pragma: no branch
time.sleep(0.1)
requests.put(None)
do_cancel()
check_is_cancelled()
# The same test again in 1:m mode. This time we can write the output
# before going to sleep, which means we can test everything gets done.
def worker_1_m(value, check_cancelled, responses):
responses.put(value)
worker_1_1(value, check_cancelled)
requests, responses = channel.thread_crew(100, worker_1_m, mode='1:m')
requests.put_many(range(100))
assert set(itertools.islice(responses, 100)) == set(range(100))
do_cancel()
check_is_cancelled()
@pytest.mark.parametrize('count', [1, 5, 100])
def test_thread_crew_exception(count):
"""
As with thread_source(), if the thread raises then everything is cancelled.
"""
def worker_1_1(value, check_cancelled):
if value == 99:
raise ValueError(value)
requests, responses = channel.thread_crew(count, worker_1_1)
requests.put_many(range(100))
with pytest.raises(channel.Cancelled):
# We could get up to 99 values out, but by then it must be cancelled.
for _ in range(100): # pragma: no branch
responses.get()
with pytest.raises(channel.Cancelled):
# 10 seconds should be plenty of time for the crew to respond.
for _ in range(100): # pragma: no branch
time.sleep(0.1)
requests.put(None)
@pytest.mark.parametrize('count', [1, 5, 100])
def test_thread_crew_channel(count):
"""
Everything thread_crew does, it can do when the output channel and/or input
channel is specified by the user insted of created by the call to
thread_crew()
"""
# TODO as with thread_source, we just test the basic behaviour, but it
# might be worth extending this to cover more of the thread_crew tests.
def worker(value, check, out_chnl):
out_chnl.put((value, threading.get_ident()))
if value == count - 1:
out_chnl.end()
# Now sleep until the work is finished, so that every value is run by a
# different thread, and hence we can test the number of threads.
while True:
out_chnl.check()
time.sleep(0.1)
my_requests = Channel()
my_responses = Channel()
requests, responses = channel.thread_crew(count, worker, mode='1:m', requests=my_requests, responses=my_responses)
assert requests is my_requests
assert responses is my_responses
requests.put_many(range(count)).end()
outputs = tuple(responses)
assert len(outputs) == count
assert {output[0] for output in outputs} == set(range(count))
assert len({output[1] for output in outputs}) == count
|
userportalgtk.py
|
#!/usr/bin/python
import pygtk
import gtk
import gtk.glade
import gobject
import time
import sys
import os
import subprocess
from threading import Thread
import dispatcher
import urllib2
import tempfile
global dispatcher
dispatcher = dispatcher.OvirtApi()
VarSaida = True
class Client:
def Quit(*args, **kwargs):
global VarSaida
VarSaida = False
gtk.main_quit(*args, **kwargs)
sys.exit()
def Connect(self, button=None):
selected_vm = self._cmb_main_vms.get_active_text().split(" :: ")[1]
ticket, expiry = dispatcher.ticketVm(selected_vm)
port = "port="+str(self._port)+"&" if self._port else ""
sport = "tls-port="+str(self._sport)+"&" if self._sport else ""
uri = "spice://%s/?%s%spassword=%s" % (self._host,
port,
sport,
ticket)
cmd = ["spicy", "--uri", uri]
if self._ca_file is not None:
cmd.append("--spice-ca-file=%s" % self._ca_file)
print cmd
subprocess.Popen(cmd)
def Auth(self, button=None):
url = self._ent_auth_server.get_text()
cert_path = "/ca.crt"
username = self._ent_auth_user.get_text()
password = self._ent_auth_pass.get_text()
try:
cert = urllib2.urlopen(url+cert_path).read()
cert_file = tempfile.NamedTemporaryFile(delete=False)
cert_file
cert_file.write(cert)
cert_file.close()
self._ca_file = cert_file.name
except:
self._sta_main.push(0, "CA certificate not found in %s%s" %
(url, cert_path))
self._ca_file = None
login, msg = dispatcher.login(url,
username,
password,
self._ca_file)
if login:
self._sta_main.push(0, "User %s logged in" % username)
self._window1.hide()
self._window2.show()
self.List()
t = Thread(target=self.Status)
t.start()
else:
self._sta_auth.push(0, msg)
def List(self, button=None):
self._liststore.clear()
for vm in dispatcher.getUserVms():
self._liststore.append([vm.name + " :: " + vm.id])
self._cmb_main_vms.set_active(0)
def Status(self, button=None):
global VarSaida
while VarSaida:
selected_vm = self._cmb_main_vms.get_active_text().split(" :: ")[1]
vm = dispatcher.getVmById(selected_vm)
state = vm.status.state
vcpus = vm.cpu.topology
memory = vm.memory
os = vm.os.type_
if vm.usb.enabled:
usb = "Enabled"
else:
usb = "Disabled"
display = vm.get_display()
self._port = display.get_port()
self._sport = display.get_secure_port()
self._host = display.get_address()
self._btn_main_refresh.set_sensitive(True)
self._lab_Smp.set_text(str(vcpus.cores * vcpus.sockets))
self._lab_Memory.set_text(str(memory / (1024*1024)))
self._lab_Display.set_text(vm.display.type_)
self._lab_Usb.set_text(usb)
self._lab_Status.set_text(state)
if "rhel" in os:
self._img_So.set_from_file(self._dir + "/images/rhel.png")
elif "sles" in os:
self._img_So.set_from_file(self._dir + "/images/sles.png")
elif "ubuntu" in os:
self._img_So.set_from_file(self._dir + "/images/ubuntu.png")
elif "other_linux" in os:
self._img_So.set_from_file(self._dir + "/images/linux.png")
elif "windows" in os:
self._img_So.set_from_file(self._dir + "/images/win.png")
else:
self._img_So.set_from_file(self._dir + "/images/ovirt.png")
if state == "up" or state == "powering_up":
self._checkbutton1.set_sensitive(True)
self._cmb_main_vms.set_sensitive(True)
self._btn_main_refresh.set_sensitive(True)
self._btn_main_start.set_sensitive(False)
self._btn_main_stop.set_sensitive(True)
self._btn_main_connect.set_sensitive(True)
else:
self._checkbutton1.set_sensitive(True)
self._cmb_main_vms.set_sensitive(True)
self._btn_main_refresh.set_sensitive(True)
self._btn_main_start.set_sensitive(True)
self._btn_main_stop.set_sensitive(False)
self._btn_main_connect.set_sensitive(False)
time.sleep(2)
def Start(self, button=None):
selected_vm = self._cmb_main_vms.get_active_text().split(" :: ")[1]
start, msg, details = dispatcher.startVm(selected_vm)
if start:
self._sta_main.push(0, "Success starting VM")
else:
self._sta_main.push(0, "%s: %s" % (msg, details))
def Stop(self, button=None):
selected_vm = self._cmb_main_vms.get_active_text().split(" :: ")[1]
stop, msg, details = dispatcher.stopVm(selected_vm)
if stop:
self._sta_main.push(0, "Success stopping VM")
else:
self._sta_main.push(0, "%s: %s" % (msg, details))
def __init__(self):
gtk.gdk.threads_init()
self._dir = os.path.dirname(os.path.abspath(__file__))
self._gladefile = "%s/%s" % (self._dir, "userportalgtk.glade")
self._wTree = gtk.glade.XML(self._gladefile)
self._window1 = self._wTree.get_widget("window1")
self._window2 = self._wTree.get_widget("window2")
if (self._window1):
self._window1.connect("destroy", self.Quit)
if (self._window2):
self._window2.connect("destroy", self.Quit)
self._btn_auth_ok = self._wTree.get_widget("button1")
self._btn_auth_cancel = self._wTree.get_widget("button2")
self._ent_auth_user = self._wTree.get_widget("entry1")
self._ent_auth_pass = self._wTree.get_widget("entry2")
self._ent_auth_server = self._wTree.get_widget("entry3")
self._sta_auth = self._wTree.get_widget("statusbar1")
self._sta_main = self._wTree.get_widget("statusbar2")
self._lab_Smp = self._wTree.get_widget("label7")
self._lab_Memory = self._wTree.get_widget("label9")
self._lab_Display = self._wTree.get_widget("label11")
self._lab_Usb = self._wTree.get_widget("label13")
self._lab_Status = self._wTree.get_widget("label15")
self._img_So = self._wTree.get_widget("image1")
self._btn_main_refresh = self._wTree.get_widget("button3")
self._btn_main_start = self._wTree.get_widget("button4")
self._btn_main_connect = self._wTree.get_widget("button5")
self._btn_main_stop = self._wTree.get_widget("button6")
self._checkbutton1 = self._wTree.get_widget("checkbutton1")
self._cmb_main_vms = self._wTree.get_widget("combobox1")
self._liststore = gtk.ListStore(gobject.TYPE_STRING)
self._cmb_main_vms.set_model(self._liststore)
cell = gtk.CellRendererText()
self._cmb_main_vms.pack_start(cell, True)
self._cmb_main_vms.add_attribute(cell, 'text', 0)
self._btn_main_refresh.connect("clicked", self.List)
self._btn_main_start.connect("clicked", self.Start)
self._btn_main_stop.connect("clicked", self.Stop)
self._btn_main_connect.connect("clicked", self.Connect)
self._btn_auth_ok.connect("clicked", self.Auth)
self._btn_auth_cancel.connect("clicked", self.Quit)
self._window1.show()
if __name__ == "__main__":
hwg = Client()
gtk.main()
|
sge.py
|
#!/usr/bin/env python
# coding:utf-8
# 投递任务,将任务散播到集群SGE,并等待所有任务执行完成时退出。 杀掉本主进程时,由本进程投递的qsub任务也会被杀掉,不接受`kill -9`信号
import os
import sys
import time
import signal
import getpass
import argparse
from shutil import rmtree
from threading import Thread
from datetime import datetime
from subprocess import call, PIPE
from .utils import *
from .cluster import *
class ParseSingal(Thread):
def __init__(self, name="", mode="sge", conf=None):
super(ParseSingal, self).__init__()
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
self.name = name
self.mode = mode
self.conf = conf
def run(self):
time.sleep(1)
def signal_handler(self, signum, frame):
user = getpass.getuser()
if self.mode == "sge":
call('qdel "%s*"' % self.name, shell=True, stderr=PIPE, stdout=PIPE)
elif self.mode == "batchcompute":
jobs = self.conf.jobqueue.queue
for jb in jobs:
jobname = jb.name
try:
jobid = self.conf.cloudjob.get(jobname, "")
j = self.conf.client.get_job(jobid)
except ClientError as e:
if e.status == 404:
self.conf.logger.info("Invalid JobId %s", jobid)
continue
except:
continue
if j.Name.startswith(user):
if j.State not in ["Stopped", "Failed", "Finished"]:
self.conf.client.stop_job(jobid)
self.conf.client.delete_job(jobid)
self.conf.logger.info("Delete job %s success", j.Name)
else:
self.conf.logger.info(
"Delete job error, you have no assess with job %s", j.Name)
sys.exit(signum)
def not_empty(s):
return s and s.strip() and (not s.strip().startswith("#")) and s.strip().lower() != "wait"
def parserArg():
pid = os.getpid()
parser = argparse.ArgumentParser(
description="For multi-run your shell scripts localhost or qsub, please use `runsge` instead.")
parser.add_argument("-q", "--queue", type=str, help="the queue your job running, default: all.q",
default=["all.q", ], nargs="*", metavar="<queue>")
parser.add_argument("-m", "--memory", type=int,
help="the memory used per command (GB), default: 1", default=1, metavar="<int>")
parser.add_argument("-c", "--cpu", type=int,
help="the cpu numbers you job used, default: 1", default=1, metavar="<int>")
parser.add_argument("-wd", "--workdir", type=str, help="work dir, default: %s" %
os.path.abspath(os.getcwd()), default=os.path.abspath(os.getcwd()), metavar="<workdir>")
parser.add_argument("-N", "--jobname", type=str,
help="job name", metavar="<jobname>")
parser.add_argument("-o", "--logdir", type=str,
help='the output log dir, default: "qsub.out.*"', metavar="<logdir>")
parser.add_argument("-n", "--num", type=int,
help="the max job number runing at the same time. default: all in your job file", metavar="<int>")
parser.add_argument("-s", "--startline", type=int,
help="which line number be used for the first job tesk. default: 1", metavar="<int>", default=1)
parser.add_argument("-e", "--endline", type=int,
help="which line number (include) be used for the last job tesk. default: all in your job file", metavar="<int>")
parser.add_argument("-b", "--block", action="store_true", default=False,
help="if passed, block when submitted you job, default: off")
parser.add_argument("jobfile", type=str,
help="the input jobfile", metavar="<jobfile>")
progargs = parser.parse_args()
if progargs.logdir is None:
progargs.logdir = os.path.join(os.path.abspath(os.path.dirname(
progargs.jobfile)), "qsub.out."+os.path.basename(progargs.jobfile))
if progargs.jobname is None:
progargs.jobname = os.path.basename(progargs.jobfile) + "_" + str(pid) if not os.path.basename(
progargs.jobfile)[0].isdigit() else "job_" + os.path.basename(progargs.jobfile) + "_" + str(pid)
with open(progargs.jobfile) as fi:
allcmds = fi.readlines()
if progargs.endline is None:
progargs.endline = len(allcmds)
allcmds = allcmds[(progargs.startline-1):progargs.endline]
allcmds = filter(not_empty, allcmds)
progargs.alljobs = len(allcmds)
if progargs.alljobs == 0:
print("Error: No jobs for submitted!")
sys.exit(os.EX_USAGE)
if progargs.num is None:
progargs.num = progargs.alljobs
return progargs
def qsubCheck(jobname, num, block, sec=1):
qs = 0
global wait
while True:
time.sleep(sec) # check per 1 seconds
if not q.full():
continue
qs = os.popen('qstat -xml | grep %s | wc -l' % jobname).read().strip()
qs = int(qs)
if block or wait:
if qs == 0:
while True:
if q.empty():
wait = False
break
q.get()
else:
continue
else:
if qs < num:
[q.get() for _ in range(num-qs)]
else:
continue
def checkAllSuccess(logdir):
logfile = [os.path.join(logdir, i) for i in os.listdir(logdir)]
stat = []
for f in logfile:
with open(f) as fi:
ctx = fi.readlines()[-1].strip().split()[-1]
if ctx == "SUCCESS":
stat.append(True)
else:
stat.append(False)
if len(stat) != 0 and all(stat):
return True
else:
return False
def main():
args = parserArg()
jobfile = args.jobfile
args.num = max(1, args.num)
global q, jn, wait
wait = False
jn = args.jobname
q = Queue(maxsize=args.num)
p = Thread(target=qsubCheck, args=(args.jobname, args.num, args.block))
p.setDaemon(True)
p.start()
h = ParseSingal(name=jn)
h.start()
success = []
with open(jobfile) as fi:
if os.path.isdir(args.logdir):
oldlog = [os.path.join(args.logdir, i)
for i in os.listdir(args.logdir)]
for ol in oldlog:
with open(ol) as oll:
ocmd = oll.readline().strip()
ostat = oll.readlines()[-1].strip().split()[-1]
if ocmd and ostat == "SUCCESS":
success.append(ocmd)
else:
rmtree(args.logdir)
continue
os.makedirs(args.logdir)
for n, line in enumerate(fi):
line = line.strip().strip("& ")
if n+1 < args.startline or n+1 > args.endline:
continue
if line.lower() == "wait": # 若碰到wait, 则自动阻塞,等待前面所有任务执行完毕之后再开始投递
wait = True
while True:
if q.full():
break
q.put(n)
continue
if line and not line.startswith("#"):
if line in success:
continue
logfile = os.path.join(args.logdir, os.path.basename(
jobfile)+".line") + str(n+1) + ".log"
# 300 hours 3600*300 = 1080000
q.put(n, block=True, timeout=1080000)
qsubline = line if line.endswith("ERROR") else line+RUNSTAT
qsubline = "echo [`date +'%F %T'`] RUNNING... && " + qsubline
qsubline = qsubline.replace('"', '\\"')
cmd = 'qsub -q %s -wd %s -N "%s" -o %s -j y -l vf=%dg,p=%d <<< "%s"' % (" -q ".join(args.queue),
args.workdir, args.jobname+"_" +
str(
n+1), logfile, args.memory, args.cpu, qsubline
)
logcmd = open(logfile, "w")
logcmd.write(line+"\n")
logcmd.write("[%s] " % datetime.today().strftime("%F %X"))
logcmd.flush()
call(cmd, shell=True, stdout=logcmd, stderr=logcmd)
logcmd.close()
while True:
time.sleep(2) # check per 2 seconds
qs = os.popen('qstat -xml | grep %s | wc -l' %
args.jobname).read().strip()
qs = int(qs)
if qs == 0:
# '''
# 此处可添加操作,所有任务完成之后进行的处理可写在这部分
# '''
qsubstat = checkAllSuccess(args.logdir)
if qsubstat:
print("[%s] All tesks in file (%s) finished successfully." % (
datetime.today().isoformat(), os.path.abspath(args.jobfile)))
# rmtree(args.logdir) # 删除logdir
else:
print("[%s] All tesks in file (%s) finished, But there are ERROR tesks." % (
datetime.today().isoformat(), os.path.abspath(args.jobfile)))
break
return
if __name__ == "__main__":
main()
|
test_pool.py
|
import collections
import random
import threading
import time
from unittest.mock import ANY
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.pool.base import _AsyncConnDialect
from sqlalchemy.pool.base import _ConnDialect
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_warns_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_not_none
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup_test(self):
self._teardown_conns = []
def teardown_test(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
_is_asyncio = kw.pop("_is_asyncio", False)
p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw)
if _is_asyncio:
p._is_asyncio = True
p._dialect = _AsyncConnDialect()
return dbapi, p
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.dbapi_connection, c2.dbapi_connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.dbapi_connection
c1 = r1.get_connection()
is_(c1, r1.dbapi_connection)
is_(c1, r1.connection)
is_(c1, r1.driver_connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.dbapi_connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.dbapi_connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.dbapi_connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.FallbackAsyncAdaptedQueuePool, True),
(pool.NullPool, None),
(pool.SingletonThreadPool, False),
(pool.StaticPool, None),
(pool.AssertionPool, None),
)
def test_is_asyncio_from_dialect(self, pool_cls, is_async_kind):
p = pool_cls(creator=object())
for is_async in (True, False):
if is_async:
p._dialect = _AsyncConnDialect()
else:
p._dialect = _ConnDialect()
if is_async_kind is None:
eq_(p._is_asyncio, is_async)
else:
eq_(p._is_asyncio, is_async_kind)
@testing.combinations(
(pool.QueuePool, False),
(pool.AsyncAdaptedQueuePool, True),
(pool.FallbackAsyncAdaptedQueuePool, True),
(pool.NullPool, False),
(pool.SingletonThreadPool, False),
(pool.StaticPool, False),
(pool.AssertionPool, False),
)
def test_is_asyncio_from_dialect_cls(self, pool_cls, is_async):
eq_(pool_cls._is_asyncio, is_async)
def test_rec_fairy_default_dialect(self):
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
is_(rec.driver_connection, rec.dbapi_connection)
fairy = pool._ConnectionFairy(rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.connection, fairy.dbapi_connection)
is_(fairy.driver_connection, fairy.dbapi_connection)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, rec.driver_connection)
def test_rec_fairy_adapted_dialect(self):
dbapi = MockDBAPI()
mock_dc = object()
class _AdaptedDialect(_ConnDialect):
def get_driver_connection(self, connection):
return mock_dc
p1 = pool.Pool(
creator=lambda: dbapi.connect("foo.db"), dialect=_AdaptedDialect()
)
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
is_(rec.driver_connection, mock_dc)
fairy = pool._ConnectionFairy(rec.dbapi_connection, rec, False)
is_not_none(fairy.dbapi_connection)
is_(fairy.connection, fairy.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
is_(fairy.dbapi_connection, rec.dbapi_connection)
is_(fairy.driver_connection, mock_dc)
def test_connection_setter(self):
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
rec = pool._ConnectionRecord(p1)
is_not_none(rec.dbapi_connection)
is_(rec.connection, rec.dbapi_connection)
rec.connection = 42
is_(rec.connection, rec.dbapi_connection)
rec.dbapi_connection = 99
is_(rec.connection, rec.dbapi_connection)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect:
is_async = False
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
def get_driver_connection(self, connection):
return connection
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "CL", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self, _is_asyncio=False):
p = self._queuepool_fixture(_is_asyncio=_is_asyncio)
canary = []
@event.listens_for(p, "checkin")
def checkin(*arg, **kw):
canary.append("checkin")
@event.listens_for(p, "close_detached")
def close_detached(*arg, **kw):
canary.append("close_detached")
@event.listens_for(p, "detach")
def detach(*arg, **kw):
canary.append("detach")
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.dbapi_connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.dbapi_connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.combinations((True,), (False,))
def test_checkin_event_gc(self, detach_gced):
p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced)
c1 = p.connect()
dbapi_connection = weakref.ref(c1.dbapi_connection)
eq_(canary, [])
del c1
lazy_gc()
if detach_gced:
# "close_detached" is not called because for asyncio the
# connection is just lost.
eq_(canary, ["detach"])
else:
eq_(canary, ["checkin"])
gc_collect()
if detach_gced:
is_none(dbapi_connection())
else:
is_not_none(dbapi_connection())
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
with engine.connect() as conn:
conn.execute(select(1))
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown_test(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
"""test for :ticket:`2964`, where the pool would not mutex the
initialization of the dialect.
Unfortunately, as discussed in :ticket:`6337`, this test suite did not
ensure that the ``Engine`` itself actually uses the "first_connect" event,
so when :ticket:`5497` came along, the "first_connect" event was no longer
used and no test detected the re-introduction of the exact same race
condition, which was now worse as the un-initialized dialect would now
pollute the SQL cache causing the application to not work at all.
A new suite has therefore been added in test/engine/test_execute.py->
OnConnectTest::test_initialize_connect_race to ensure that the engine
in total synchronizes the "first_connect" process, which now works
using a new events feature _exec_w_sync_on_first_run.
"""
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
eq_(status(p), (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.dbapi_connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.dbapi_connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.dbapi_connection
c2_con = c2.dbapi_connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.dbapi_connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.dbapi_connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.dbapi_connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.dbapi_connection)
c1.close()
c2 = p.connect()
is_(c2.dbapi_connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.dbapi_connection, c2.dbapi_connection)
c2.close()
c3 = p.connect()
is_not(c3.dbapi_connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.dbapi_connection, c3.dbapi_connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.dbapi_connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.dbapi_connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.combinations((True,), (False,))
def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, _is_asyncio=detach_gced
)
if detach_gced:
pool._dialect.is_async = True
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.dbapi_connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.dbapi_connection
del conn
gc_collect()
if detach_gced:
# new connection was detached + abandoned on return
eq_(dbapi_conn.mock_calls, [])
else:
# new connection reset and returned to pool
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.dbapi_connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.dbapi_connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.dbapi_connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.dbapi_connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.dbapi_connection
c1.invalidate()
assert c1.dbapi_connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.dbapi_connection is not c1_con
c2_con = c2.dbapi_connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_warns_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.dbapi_connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c3)
pc2 = p.connect()
is_(pc2.dbapi_connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.dbapi_connection, c2)
pc2 = p.connect()
is_(pc2.dbapi_connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.dbapi_connection, c2)
pc2 = p.connect()
is_(pc2.dbapi_connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.dbapi_connection, c1)
pc2 = p.connect()
is_(pc2.dbapi_connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.dbapi_connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.dbapi_connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
def test_connect(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
c1 = p.connect()
conn = c1.dbapi_connection
c1.close()
c2 = p.connect()
is_(conn, c2.dbapi_connection)
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
openvpn.py
|
#!/usr/bin/python
# openvpn.py: library to handle starting and stopping openvpn instances
import logging
import os
import signal
import subprocess
import threading
import time
class OpenVPN:
connected_instances = []
def __init__(self, config_file=None, auth_file=None, crt_file=None,
tls_auth=None, key_direction=None, timeout=60):
self.started = False
self.stopped = False
self.error = False
self.notifications = ""
self.auth_file = auth_file
self.crt_file = crt_file
self.tls_auth = tls_auth
self.key_dir = key_direction
self.config_file = config_file
self.thread = threading.Thread(target=self._invoke_openvpn)
self.thread.setDaemon(1)
self.timeout = timeout
def _invoke_openvpn(self):
cmd = ['sudo', 'openvpn', '--script-security', '2']
# --config must be the first parameter, since otherwise
# other specified options might not be able to overwrite
# the wrong, relative-path options in config file
if self.config_file is not None:
cmd.extend(['--config', self.config_file])
if self.crt_file is not None:
cmd.extend(['--ca', self.crt_file])
if self.tls_auth is not None and self.key_dir is not None:
cmd.extend(['--tls-auth', self.tls_auth, self.key_dir])
if self.auth_file is not None:
cmd.extend(['--auth-user-pass', self.auth_file])
self.process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid)
self.kill_switch = self.process.terminate
self.starting = True
while True:
line = self.process.stdout.readline().strip()
if not line:
break
self.output_callback(line, self.process.terminate)
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
def start(self, timeout=None):
"""
Start OpenVPN and block until the connection is opened or there is
an error
:param timeout: time in seconds to wait for process to start
:return:
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
logging.info("OpenVPN connected")
# append instance to connected list
OpenVPN.connected_instances.append(self)
else:
logging.warn("OpenVPN not started")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line)
def stop(self, timeout=None):
"""
Stop OpenVPN process group
:param timeout: time in seconds to wait for process to stop
:return:
"""
if not timeout:
timeout = self.timeout
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
self.thread.join(timeout)
if self.stopped:
logging.info("OpenVPN stopped")
if self in OpenVPN.connected_instances:
OpenVPN.connected_instances.remove(self)
else:
logging.error("Cannot stop OpenVPN!")
for line in self.notifications.split('\n'):
logging.warn("OpenVPN output:\t\t%s" % line)
|
word2vec_optimized.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.step)
if FLAGS.interactive:
# E.g.,
# [0]: model.Analogy('france', 'paris', 'russia')
# [1]: model.Nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
actor_definition.py
|
import contextlib
import logging
import os
import pkgutil
import sys
from io import UnsupportedOperation
from multiprocessing import Process, Queue
import leapp.libraries.actor
from leapp.actors import get_actors, get_actor_metadata
from leapp.exceptions import ActorInspectionFailedError, MultipleActorsError, UnsupportedDefinitionKindError,\
LeappRuntimeError
from leapp.repository import DefinitionKind
from leapp.repository.loader import library_loader
def inspect_actor(definition, result_queue):
"""
Retrieves the actor information in a child process and returns the results back through `result_queue`.
:param definition: the actor definition to load
:type definition: :py:class:`ActorDefinition`
:param result_queue: queue to pass results back to the calling process
:type result_queue: :py:class:`multiprocessing.Queue`
"""
definition.load()
result = [get_actor_metadata(actor) for actor in get_actors()]
result = [entry for entry in result if entry['path'] in definition.full_path]
result_queue.put(result)
class ActorCallContext(object):
"""
Wraps the actor execution into child process.
"""
def __init__(self, definition, logger, messaging):
"""
:param definition: Actor definition
:type definition: :py:class:`leapp.repository.actor_definition.ActorDefinition`
:param logger: Logger
:type logger: :py:class:`logging.Logger`
:param messaging: Leapp Messaging
:type messaging: :py:class:`leapp.messaging.BaseMessaging`
"""
self.definition = definition
self.logger = logger
self.messaging = messaging
@staticmethod
def _do_run(stdin, logger, messaging, definition, args, kwargs):
if stdin is not None:
sys.stdin = os.fdopen(stdin)
definition.load()
with definition.injected_context():
target_actor = [actor for actor in get_actors() if actor.name == definition.name][0]
target_actor(logger=logger, messaging=messaging).run(*args, **kwargs)
def run(self, *args, **kwargs):
"""
Performs the actor execution in the child process.
"""
try:
stdin = sys.stdin.fileno()
except UnsupportedOperation:
stdin = None
p = Process(target=self._do_run, args=(stdin, self.logger, self.messaging, self.definition, args, kwargs))
p.start()
p.join()
if p.exitcode != 0:
raise LeappRuntimeError(
'Actor {actorname} unexpectedly terminated with exit code: {exitcode}'
.format(actorname=self.definition.name, exitcode=p.exitcode))
class ActorDefinition(object):
"""
Defines actor resources.
"""
def __init__(self, directory, repo_dir, log=None):
"""
:param log: Logger
:type log: :py:class:`logging.Logger`
:param directory: Actor directory
:type directory: str
:param repo_dir: Repository directory
:type repo_dir: str
"""
self.log = log or logging.getLogger('leapp.actor')
self._directory = directory
self._repo_dir = repo_dir
self._definitions = {}
self._module = None
self._discovery = None
@property
def full_path(self):
return os.path.realpath(os.path.join(self._repo_dir, self._directory))
def add(self, kind, path):
"""
Adds any kind of actor resource to the Definition
:param kind: kind of resource added
:type kind: str
:param path: path to the added resource
:type path: str
"""
if kind not in DefinitionKind.ACTOR_WHITELIST:
self.log.error("Attempt to add item type %s to actor that is not supported", kind.name)
raise UnsupportedDefinitionKindError('Actors do not support {kind}.'.format(kind=kind.name))
self._definitions.setdefault(kind, []).append(path)
def dump(self):
"""
:return: dump of actor resources (path, name, tools, files, libraries, tests)
"""
return {
'path': self.directory,
'name': self.name,
'tools': self.tools,
'files': self.files,
'libraries': self.libraries,
'tests': self.tests
}
def load(self):
"""
Loads the actor module to be introspectable.
"""
if not self._module:
with self.injected_context():
path = os.path.abspath(os.path.join(self._repo_dir, self.directory))
for importer, name, is_pkg in pkgutil.iter_modules((path,)):
if not is_pkg:
self._module = importer.find_module(name).load_module(name)
break
def discover(self):
"""
Performs introspection through a subprocess.
:return: Dictionary with discovered items.
"""
if not self._discovery:
self.log.debug("Starting actor discovery in %s", self.directory)
q = Queue(1)
p = Process(target=inspect_actor, args=(self, q))
p.start()
p.join()
if p.exitcode != 0:
self.log.error("Process inspecting actor in %s failed with %d", self.directory, p.exitcode)
raise ActorInspectionFailedError('Inspection of actor in {path} failed'.format(path=self.directory))
result = q.get()
if not result:
self.log.error("Process inspecting actor in %s returned no result", self.directory)
raise ActorInspectionFailedError(
'Inspection of actor in {path} produced no results'.format(path=self.directory))
if len(result) > 1:
self.log.error("Actor in %s returned multiple actors", self.directory)
raise MultipleActorsError(self.directory)
self._discovery = result[0]
for tag in self._discovery['tags']:
if self not in tag.actors:
tag.actors += (self,)
return self._discovery
def __call__(self, messaging=None, logger=None):
return ActorCallContext(definition=self, messaging=messaging, logger=logger)
@property
def dialogs(self):
"""
:return: Tuple of defined dialogs
"""
return self.discover()['dialogs']
@property
def consumes(self):
"""
:return: Tuple of consumed models
"""
return self.discover()['consumes']
@property
def produces(self):
"""
:return: Tuple of produced models
"""
return self.discover()['produces']
@property
def tags(self):
"""
:return: Tuple of tags assigned to the actor
"""
return self.discover()['tags']
@property
def class_name(self):
"""
:return: Actor class name
"""
return self.discover()['class_name']
@property
def name(self):
"""
:return: Actor internal name
"""
return self.discover()['name']
@property
def description(self):
"""
:return: Actor description
"""
return self.discover()['description']
@contextlib.contextmanager
def injected_context(self):
"""
Prepares the actor environment for running the actor.
This includes injecting actor private libraries into :py:mod:`leapp.libraries.actor`
and setting environment variables for private tools and files.
:note: Use with caution.
"""
# Backup of the path variable
path_backup = os.environ.get('PATH', '')
os.environ['PATH'] = ':'.join(path_backup.split(':') +
list(os.path.join(self._repo_dir, self._directory, path) for path in self.tools))
files_backup = os.environ.get('LEAPP_FILES', None)
if self.files:
os.environ['LEAPP_FILES'] = os.path.join(self._repo_dir, self._directory, self.files[0])
# We make a snapshot of the symbols in the module
before = leapp.libraries.actor.__dict__.keys()
# Now we are loading all modules and packages and injecting them at the same time into the modules at hand
to_add = library_loader(leapp.libraries.actor, 'leapp.libraries.actor',
map(lambda x: os.path.join(self._repo_dir, self.directory, x), self.libraries))
backup = {}
# Now we are injecting them into the global sys.modules dictionary and keep a backup of existing ones
# The backup shouldn't be necessary, but just in case
for name, mod in to_add:
if name in sys.modules:
backup[name] = sys.modules[name]
sys.modules[name] = mod
previous_path = os.getcwd()
os.chdir(os.path.join(self._repo_dir, self._directory))
try:
yield
finally:
os.chdir(previous_path)
# Restoration of the PATH environment variable
os.environ['PATH'] = path_backup
# Restoration of the LEAPP_FILES environment variable
if files_backup is not None:
os.environ['LEAPP_FILES'] = files_backup
# Remove all symbols in the actor lib before the execution
current = leapp.libraries.actor.__dict__.keys()
added = set(current).difference(before)
for symbol in added:
leapp.libraries.actor.__dict__.pop(symbol)
# Remove all modules from the sys.modules dict or restore from backup if it was there
for name, _ in to_add:
if name in backup:
sys.modules[name] = backup[name]
else:
sys.modules.pop(name)
@property
def directory(self):
"""
:return: The folder path of the actor
"""
return self._directory
@property
def tools(self):
"""
:return: Tuple with path to the tools folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TOOLS, ()))
@property
def libraries(self):
"""
:return: Tuple with path to the libraries folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.LIBRARIES, ()))
@property
def files(self):
"""
:return: Tuple with path to the files folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.FILES, ()))
@property
def tests(self):
"""
:return: Tuple with path to the tests folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TESTS, ()))
|
zPushTest.py
|
#!/usr/bin/python
import time
import json
import urllib
import urllib2
import base64
import random
from multiprocessing import Process
PUSHD_SERVER = 'http://54.64.230.59:5566'
PUSHD_SERVER_WITHOUT_AUTH = 'http://54.64.230.59:5566'
PUSHD_AUTHORIZATION = 'Basic %s' % base64.encodestring('admin:admin')
TOKEN_HTTP = 'http://localhost:5001/log'
class RepeatingMessage:
def __init__(self, event, messagesPerMinute):
self.event = event
self.messagesPerMinute = messagesPerMinute
self.pushCount = 0
def push(self):
print 'Pushing message to ' + self.event
self.pushCount += 1
msg = self.generate_message()
urllib.urlopen(PUSHD_SERVER + '/event/' + self.event, msg).read()
def generate_message(self):
return 'title=performance test&msg=%s' % self.generate_body()
def generate_body(self):
t = time.time()
readable = time.strftime('%Y-%m-%d %I:%M:%S', time.localtime(t))
message = {'timestamp': t,
'readable_timestamp': readable,
'event': self.event}
return json.dumps(message)
class Subscriber:
def __init__(self, token, proto):
self.token = token
self.proto = proto
self.subscriberId = None
self.registerSubscriber()
def registerSubscriber(self):
print 'Registering subscriber %s' % self.token
data = 'proto=%s&token=%s&lang=fi&badge=0' % (self.proto, self.token)
response = urllib.urlopen(PUSHD_SERVER + '/subscribers', data).read()
parsedResponse = json.loads(response)
if 'id' not in parsedResponse:
raise RuntimeError('No id in the reponse')
self.subscriberId = parsedResponse['id']
def subscribe(self, event):
print 'User (token %s) subscribing to %s' % (self.token, event)
url = PUSHD_SERVER + '/subscriber/%s/subscriptions/%s' % \
(self.subscriberId, event)
data = 'ignore_message=0'
urllib.urlopen(url, data).read()
def unregister(self):
print 'Unregistering user %s' % self.token
url = PUSHD_SERVER_WITHOUT_AUTH + '/subscriber/%s' % self.subscriberId
request = urllib2.Request(url, data='')
request.add_header('Authorization', PUSHD_AUTHORIZATION)
request.get_method = lambda: 'DELETE'
opener = urllib2.build_opener(urllib2.HTTPHandler)
opener.open(request).read()
def pusherProcessMain(repeatingMessage):
try:
while True:
repeatingMessage.push()
time.sleep(60./repeatingMessage.messagesPerMinute)
except KeyboardInterrupt:
pass
print '%d messages pushed to %s' % \
(repeatingMessage.pushCount, repeatingMessage.event)
def generateRandomHTTPSubscribers(event, count):
subscribers = []
print 'Creating %d subscribers for %s' % (count, event)
for i in xrange(count):
subscriber = Subscriber(randomHTTPToken(), 'http')
subscriber.subscribe(event)
subscribers.append(subscriber)
return subscribers
def randomHTTPToken():
r = ''.join([random.choice('0123456789ABCDEF') for x in xrange(10)])
return TOKEN_HTTP + '/' + r
def startPushProcesses(targets):
print 'Starting %d push processes' % len(targets)
processes = []
for message in targets:
p = Process(target=pusherProcessMain, args=(message,))
p.daemon = True
p.start()
processes.append(p)
print 'All processes started'
return processes
def settings():
# events and notification frequencies
##push_targets = [RepeatingMessage('performancetest1', 1),
## RepeatingMessage('performancetest2', 1)]
##subscribers = [generateRandomHTTPSubscribers(push_targets[0].event, 10),
## generateRandomHTTPSubscribers(push_targets[1].event, 5)]
push_targets = [RepeatingMessage('performancetest1', 2)]
subscribers = [generateRandomHTTPSubscribers('performancetest1',10)]
return push_targets, subscribers
def main():
push_targets, subscribers = settings()
processes = startPushProcesses(push_targets)
try:
while True:
time.sleep(100)
except KeyboardInterrupt:
print 'Quiting...'
for p in processes:
p.terminate()
p.join()
print 'All processes joined'
for subscribersForMessage in subscribers:
for subscriber in subscribersForMessage:
subscriber.unregister()
if __name__ == '__main__':
main()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python3 tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from enum import Enum
from functools import wraps
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import sys
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.config import EM_CONFIG
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, try_delete
from tools.shared import asbytes, Settings, config
from tools.utils import MACOS, WINDOWS
from tools import shared, line_endings, building
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
def decorated(self):
self.set_setting('USE_PTHREADS', 1)
if '-fsanitize=address' in self.emcc_args:
self.skipTest('asan ends up using atomics that are not yet supported in node 12')
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
'posixtest',
'posixtest_browser',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.node_args = []
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = list(config.JS_ENGINES)
self.wasm_engines = list(config.WASM_ENGINES)
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
elif type(value) == str:
ret += ['-s', f'{key}={value}']
else:
ret += ['-s', f'{key}={json.dumps(value)}']
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_test_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_test_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_test_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + \
['-I.', '-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
libraries
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if not engine:
engine = config.JS_ENGINES[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{shared.shlex_join(engine)}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertIdenticalUrlEncoded(self, expected, actual, **kwargs):
"""URL decodes the `actual` parameter before checking for equality."""
self.assertIdentical(expected, unquote(actual), **kwargs)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(building.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = path_from_root(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdenticalUrlEncoded(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open('reftest.js', 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-s', 'IN_TEST_HARNESS=1']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', path_from_root('tests', 'browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.cpp and forice-include report_result.h
args += ['-I', path_from_root('tests'),
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, expected, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a give result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%s' % expected
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
outfile = 'test.html'
args = [filepath, '-o', outfile] + args
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, config.JS_ENGINES))
if len(working_engines) < len(config.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other', 'test_posixtest')
if not EMTEST_SAVE_DIR:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
dices.py
|
import os
import telebot
import time
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
import json
import requests
import http.cookiejar as cookielib
import urllib
import urllib.request as urllib2
CJ = cookielib.LWPCookieJar()
from requests.exceptions import HTTPError
client=MongoClient(os.environ['database'])
db=client.dices
users=db.users
chats = db.chats
OPENER = urllib2.build_opener(urllib2.HTTPCookieProcessor(CJ))
bot = 'https://api.telegram.org/bot'+os.environ['dicebot']+'/'
for url in ['https://api.github.com', 'https://api.github.com/invalid']:
try:
response = requests.get(url)
response.raise_for_status()
except HTTPError as http_err:
print('HTTP error occurred: '+str(http_err))
except Exception as err:
print('Other error occurred: '+str(err))
else:
print('Success!')
u_id = 0
ems = ['🎲', '🏀', '🎯', '⚽️']
def createchat(chat):
return {
'id':chat['id'],
'results':True
}
def createuser(user):
return {
'id':user['id'],
'name':user['first_name'],
'results':{
'ball':{
'score_sum':0,
'score_amount':0,
'1':0,
'2':0,
'3':0,
'4':0,
'5':0
},
'darts':{
'score_sum':0,
'score_amount':0,
'1':0,
'2':0,
'3':0,
'4':0,
'5':0,
'6':0
},
'cube':{
'score_sum':0,
'score_amount':0,
'1':0,
'2':0,
'3':0,
'4':0,
'5':0,
'6':0
},
'football':{
'score_sum':0,
'score_amount':0,
'1':0,
'2':0,
'3':0,
'4':0,
'5':0,
'6':0
}
}
}
try:
users.find_one({'id':441399484})['results']['football']
except:
users.update_many({},{'$set':{'results.football':{
'score_sum':0,
'score_amount':0,
'1':0,
'2':0,
'3':0,
'4':0,
'5':0,
'6':0
}}})
#if users.find_one({'id':'bot'}) == None:
# users.insert_one(createuser({'id':'bot', 'first_name': 'Dices'}))
def massreklama(message):
i = 0
for ids in chats.find({}):
try:
req = requests.get(bot+'forwardMessage?chat_id='+str(ids['id'])+'&message_id='+str(message['reply_to_message']['message_id'])+'&from_chat_id='+str(message['chat']['id']))
i+=1
if i%1000 == 0:
try:
req = requests.get(bot+'sendMessage?chat_id='+str(441399484)+'&text=Сообщение получило '+str(i)+' чатов!')
except:
pass
except:
pass
req = requests.get(bot+'sendMessage?chat_id='+str(441399484)+'&text=Сообщение получило '+str(i)+' чатов!')
def testreklama(message):
try:
req = requests.get(bot+'forwardMessage?chat_id='+str(368543755)+'&message_id='+str(message['reply_to_message']['message_id'])+'&from_chat_id='+str(message['chat']['id']))
req = requests.get(bot+'forwardMessage?chat_id='+str(441399484)+'&message_id='+str(message['reply_to_message']['message_id'])+'&from_chat_id='+str(message['chat']['id']))
except:
print(traceback.format_exc())
print(message)
def new_msg(result):
try:
try:
user = users.find_one({'id':result['message']['from']['id']})
message = result['message']
except:
user = users.find_one({'id':result['result']['from']['id']})
#result = result['result']
message = result['result']
chat = chats.find_one({'id':message['chat']['id']})
if chat == None:
chats.insert_one(createchat(message['chat']))
chat = chats.find_one({'id':message['chat']['id']})
if 'reply_to_message' in message and 'text' in message and message['from']['id'] == 441399484 and message['text'].lower()[:8] == '/reklama':
massreklama(message)
if 'reply_to_message' in message and 'text' in message and message['from']['id'] == 441399484 and message['text'].lower()[:12] == '/testreklama':
testreklama(message)
if message['from']['id'] == 441399484 and 'text' in message:
text = message['text']
if text.lower()[:8] == '/reklama':
users.update_one({'id':message['from']['id']},{'$set':{'reklama':True}})
req = requests.get(bot+'sendMessage?chat_id='+str(441399484)+'&text=Режим рекламы активирован! Отправьте форвард.')
return
if message['from']['id'] == 1255836783:
user = users.find_one({'id':'bot'})
if user == None:
users.insert_one(createuser(message['from']))
user = users.find_one({'id':message['from']['id']})
amount = 0
for ids in users.find({}):
amount += 1
req = requests.get(bot+'sendMessage?chat_id='+str(441399484)+'&text=Новый юзер: '+user['name']+'. ID: '+str(user['id'])+'. Всего юзеров: '+str(amount))
#print('MESSAGE!')
#print(message)
if 'dice' in message:
if 'forward_from' in message:
return
try:
print('DICE!')
print(message)
number = message['dice']['value']
em = message['dice']['emoji']
if em == '🎯':
x = 2.5
rs = 'darts'
doptxt = 'дротик'
elif em == '🎲':
x = 3.3
rs = 'cube'
doptxt = 'кубик'
elif em == '🏀':
x = 4
rs = 'ball'
doptxt = 'мяч'
elif em == '⚽️':
x = 4
rs = 'football'
doptxt = 'футбольный мяч'
#req = urllib2.Request(bot+'sendMessage?chat_id='+str(result['message']['chat']['id'])+'&text="Брошен кубик!"')
time.sleep(x)
if user['id'] != 'bot':
if chat['results'] == True:
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text=Брошен '+doptxt+'! Результат: '+str(number)+'&reply_to_message_id='+str(message['message_id']))
users.update_one({'id':user['id']},{'$inc':{'results.'+rs+'.score_sum':number, 'results.'+rs+'.score_amount':1, str(number):number}})
except:
pass
else:
if 'text' in message:
text = message['text']
if text.lower()[:5] == '/dice' or text.lower()[:20] == '/dice@dice_saver_bot':
try:
em = text.split(' ')[1]
except:
em = random.choice(ems)
try:
item = text.split(' ')[1]
if item.lower() in ['darts', 'дартс', 'дротик']:
em = '🎯'
if item.lower() in ['basketball', 'баскетбол', 'мяч', 'мячик', 'корзина']:
em = '🏀'
if item.lower() in ['cube', 'куб', 'кубик', 'кости']:
em = '🎲'
if item.lower() in ['футбол', 'football', '⚽️']:
em = '⚽️'
except:
pass
if em not in ems:
em = random.choice(ems)
try:
req = requests.get(bot+'sendDice?chat_id='+str(message['chat']['id'])+'&emoji='+em+'&reply_to_message_id='+str(message['message_id']))
#content = OPENER.open(req).read()
msg = json.loads(req.text)
print(msg)
new_msg(msg)
except:
pass
elif text.lower()[:9] == '/my_dices' or text.lower()[:24] == '/my_dices@dice_saver_bot':
txt = ''
txt += 'Статистика бросков '+user['name']+':\n\n'
txt += '🎲:\n'
txt += ' Количество бросков: '+str(user['results']['cube']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['cube']['score_sum']/user['results']['cube']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '🎯:\n'
txt += ' Количество бросков: '+str(user['results']['darts']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['darts']['score_sum']/user['results']['darts']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '🏀:\n'
txt += ' Количество бросков: '+str(user['results']['ball']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['ball']['score_sum']/user['results']['ball']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '⚽️:\n'
txt += ' Количество бросков: '+str(user['results']['football']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['football']['score_sum']/user['results']['ball']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+txt+'&reply_to_message_id='+str(message['message_id']))
elif text.lower()[:10] == '/bot_dices' or text.lower()[:25] == '/bot_dices@dice_saver_bot':
user = users.find_one({'id':'bot'})
txt = ''
txt += 'Статистика бросков бота '+user['name']+':\n\n'
txt += '🎲:\n'
txt += ' Количество бросков: '+str(user['results']['cube']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['cube']['score_sum']/user['results']['cube']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '🎯:\n'
txt += ' Количество бросков: '+str(user['results']['darts']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['darts']['score_sum']/user['results']['darts']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '🏀:\n'
txt += ' Количество бросков: '+str(user['results']['ball']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['ball']['score_sum']/user['results']['ball']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
txt += '\n'
txt += '⚽️:\n'
txt += ' Количество бросков: '+str(user['results']['football']['score_amount'])+'\n'
try:
txt += ' Средний балл: '+str(round(user['results']['football']['score_sum']/user['results']['ball']['score_amount'], 3))+'\n'
except:
txt += ' Средний балл: 0\n'
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+txt+'&reply_to_message_id='+str(message['message_id']))
elif text.lower()[:6] == '/start' and message['chat']['type'] == 'private':
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+'Я могу сохранять результаты бросков кубика/дротика/мяча. Если добавить меня в группу, то я буду записывать статистику бросков и там.')
elif text.lower()[:5] == '/help' or text.lower()[:20] == '/help@dice_saver_bot':
tt = ''
tt += 'Дополнительные функции бота:\n\n1. Имеется возможность после команды /dice написать, какой именно бросок сделать. Все возможные варианты:\n'+\
'/dice куб/кубик/кости/cube/🎲\n'+\
'/dice мяч/мячик/баскетбол/корзина/basketball/🏀\n'+\
'/dice дротик/дартс/darts/🎯\n'+\
'/dice футбол/football/⚽️'
tt += '\n\n'
tt += '2. Когда вы используете /dice, этот бросок засчитывается боту. Увидеть статистику можно по команде /bot_dices.'
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+tt)
elif text.lower()[:11] == '/off_result' or text.lower()[:26] == '/off_result@dice_saver_bot':
chatu = requests.get(bot+'getChatMember?chat_id='+str(message['chat']['id'])+'&user_id='+str(user['id']))
msgg = json.loads(chatu.text)
print(msgg)
if message['chat']['type'] != 'private':
if msgg['result']['status'] not in ['creator', 'administrator']:
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+'Только администратор чата может делать это!')
return
if chat['results'] == True:
chats.update_one({'id':chat['id']},{'$set':{'results':False}})
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+'Вывод результатов броска отключен!')
if chat['results'] == False:
chats.update_one({'id':chat['id']},{'$set':{'results':True}})
req = requests.get(bot+'sendMessage?chat_id='+str(message['chat']['id'])+'&text='+'Вывод результатов броска включен!')
except:
pass
def polling():
global u_id
while True:
try:
#rq = 'https://api.telegram.org/bot'+os.environ['TELEGRAM_TOKEN']+'/getUpdates'
req = urllib2.Request(bot+'getUpdates?offset='+str(u_id))
content = OPENER.open(req).read()
for result in json.loads(content)['result']:
u_id = result['update_id']+1
#if(result['message']['text'] == 'привет'):
# url = BASE_URL + 'sendMessage'
# req = urllib2.Request(url)
# req.add_header("Accept","application/json")
# req.add_header('User-agent',USER_AGENT)
# req.add_data(urllib.urlencode({'chat_id':result['message']['chat']['id'],'text':'Эй Привет чувак!'}))
# OPENER.open(req).read()
threading.Thread(target = new_msg, args = [result]).start()
except:
pass
time.sleep(5)
def send_message():
i = 'https://api.telegram.org/bot123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11/sendMessage?chatid=chatid'
|
log_handler.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Beam fn API log handler."""
# pytype: skip-file
# mypy: disallow-untyped-defs
from __future__ import absolute_import
from __future__ import print_function
import logging
import math
import queue
import sys
import threading
import time
import traceback
from typing import TYPE_CHECKING
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Union
from typing import cast
import grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
# This module is experimental. No backwards-compatibility guarantees.
class FnApiLogRecordHandler(logging.Handler):
"""A handler that writes log records to the fn API."""
# Maximum number of log entries in a single stream request.
_MAX_BATCH_SIZE = 1000
# Used to indicate the end of stream.
_FINISHED = Sentinel.sentinel
# Size of the queue used to buffer messages. Once full, messages will be
# dropped. If the average log size is 1KB this may use up to 10MB of memory.
_QUEUE_SIZE = 10000
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_fn_api_pb2.LogEntry.Severity.CRITICAL,
logging.ERROR: beam_fn_api_pb2.LogEntry.Severity.ERROR,
logging.WARNING: beam_fn_api_pb2.LogEntry.Severity.WARN,
logging.INFO: beam_fn_api_pb2.LogEntry.Severity.INFO,
logging.DEBUG: beam_fn_api_pb2.LogEntry.Severity.DEBUG,
-float('inf'): beam_fn_api_pb2.LogEntry.Severity.DEBUG,
}
def __init__(self, log_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> None
super(FnApiLogRecordHandler, self).__init__()
self._alive = True
self._dropped_logs = 0
self._log_entry_queue = queue.Queue(
maxsize=self._QUEUE_SIZE
) # type: queue.Queue[Union[beam_fn_api_pb2.LogEntry, Sentinel]]
ch = GRPCChannelFactory.insecure_channel(log_service_descriptor.url)
# Make sure the channel is ready to avoid [BEAM-4649]
grpc.channel_ready_future(ch).result(timeout=60)
self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor())
self._reader = threading.Thread(
target=lambda: self._read_log_control_messages(),
name='read_log_control_messages')
self._reader.daemon = True
self._reader.start()
def connect(self):
# type: () -> Iterable
if hasattr(self, '_logging_stub'):
del self._logging_stub # type: ignore[has-type]
self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub(
self._log_channel)
return self._logging_stub.Logging(self._write_log_entries())
def map_log_level(self, level):
# type: (int) -> beam_fn_api_pb2.LogEntry.Severity.Enum
try:
return self.LOG_LEVEL_MAP[level]
except KeyError:
return max(
beam_level for python_level,
beam_level in self.LOG_LEVEL_MAP.items() if python_level <= level)
def emit(self, record):
# type: (logging.LogRecord) -> None
log_entry = beam_fn_api_pb2.LogEntry()
log_entry.severity = self.map_log_level(record.levelno)
log_entry.message = self.format(record)
log_entry.thread = record.threadName
log_entry.log_location = '%s:%s' % (
record.pathname or record.module, record.lineno or record.funcName)
(fraction, seconds) = math.modf(record.created)
nanoseconds = 1e9 * fraction
log_entry.timestamp.seconds = int(seconds)
log_entry.timestamp.nanos = int(nanoseconds)
if record.exc_info:
log_entry.trace = ''.join(traceback.format_exception(*record.exc_info))
instruction_id = statesampler.get_current_instruction_id()
if instruction_id:
log_entry.instruction_id = instruction_id
tracker = statesampler.get_current_tracker()
if tracker:
current_state = tracker.current_state()
if (current_state and current_state.name_context and
current_state.name_context.transform_id):
log_entry.transform_id = current_state.name_context.transform_id
try:
self._log_entry_queue.put(log_entry, block=False)
except queue.Full:
self._dropped_logs += 1
def close(self):
# type: () -> None
"""Flush out all existing log entries and unregister this handler."""
try:
self._alive = False
# Acquiring the handler lock ensures ``emit`` is not run until the lock is
# released.
self.acquire()
self._log_entry_queue.put(self._FINISHED, timeout=5)
# wait on server to close.
self._reader.join()
self.release()
# Unregister this handler.
super(FnApiLogRecordHandler, self).close()
except Exception:
# Log rather than raising exceptions, to avoid clobbering
# underlying errors that may have caused this to close
# prematurely.
logging.error("Error closing the logging channel.", exc_info=True)
def _write_log_entries(self):
# type: () -> Iterator[beam_fn_api_pb2.LogEntry.List]
done = False
while not done:
log_entries = [self._log_entry_queue.get()]
try:
for _ in range(self._MAX_BATCH_SIZE):
log_entries.append(self._log_entry_queue.get_nowait())
except queue.Empty:
pass
if log_entries[-1] is self._FINISHED:
done = True
log_entries.pop()
if log_entries:
# typing: log_entries was initialized as List[Union[..., Sentinel]],
# but now that we've popped the sentinel out (above) we can safely cast
yield beam_fn_api_pb2.LogEntry.List(
log_entries=cast(List[beam_fn_api_pb2.LogEntry], log_entries))
def _read_log_control_messages(self):
# type: () -> None
# Only reconnect when we are alive.
# We can drop some logs in the unlikely event of logging connection
# dropped(not closed) during termination when we still have logs to be sent.
# This case is unlikely and the chance of reconnection and successful
# transmission of logs is also very less as the process is terminating.
# I choose not to handle this case to avoid un-necessary code complexity.
alive = True # Force at least one connection attempt.
while alive:
# Loop for reconnection.
log_control_iterator = self.connect()
if self._dropped_logs > 0:
logging.warning(
"Dropped %d logs while logging client disconnected",
self._dropped_logs)
self._dropped_logs = 0
try:
for _ in log_control_iterator:
# Loop for consuming messages from server.
# TODO(vikasrk): Handle control messages.
pass
# iterator is closed
return
except Exception as ex:
print(
"Logging client failed: {}... resetting".format(ex),
file=sys.stderr)
# Wait a bit before trying a reconnect
time.sleep(0.5) # 0.5 seconds
alive = self._alive
|
CTRLdeck.py
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from inc.getCOM import serial_ports
from pystray import MenuItem as item
import pystray
from PIL import Image, ImageTk
import inc.serialValuetoVolume as serialValuetoVolume
import threading
import pythoncom
import logging
from time import sleep
from numpy import interp
# Create global variable for arduino port. Can't remember if it is still needed
chosenPort = str()
# Create list variable to hold information in buffer file. It must hold these variables so that we don't reference empty indices
global lineList
lineList = ["1", "\n2", "\n3", "\n4", "\n5"] # Default value to maintain the correct number of indicies.
macroList = ["1", "\n2", "\n3", "\n4", "\n5", "\n6", "\n7", "\n8", "\n9", "\n10", "\n11", "\n12"]
# Variable for systray icon
global icon
# List to which we append threads
threads = []
# Create log file
logging.basicConfig(filename='ctrldeck.log', filemode= 'w', level=logging.ERROR)
#------------------------------------------------------------------
# Create Functions for getting user chosen port and
# using it to open the serial port
#------------------------------------------------------------------
# Get chosen COM port from drop down menu and open serial port
def savePortChoice(event):
global chosenPort
chosenPort = str(portsVar.get())
portFile = open("COMport", "w")
lineList[0] = (chosenPort)
portFile.writelines(lineList)
portFile.close()
#------------------------------------------------------------------
# Create Functions for getting user chosen AudioSession and
# using it to create AudioController object
#------------------------------------------------------------------
# get chosen sessionID from drop down menu and set session volume to slider sliderNum value
def saveSlider(sliderNum):
slider = sliders[sliderNum - 1]
label = labels[sliderNum - 1]
# Checks for user input choice and runs filedialog function chooseFile()
process_Name = str(slider.get())
if process_Name == "Choose a file:":
process_Name = chooseFile()
if len(process_Name) > 2:
sessionOptions[sliderNum] = process_Name
else:
pass
logging.info('Process ' + process_Name + 'successfully added to Slider ' + str(sliderNum))
else:
pass
# Opens the temp file and stores the chosen process name
label.insert(END, process_Name)
global lineList
listSize = label.size()
sliderStr = ''
sliderList = list(label.get(0, listSize))
for item in sliderList:
sliderStr += str(item) + ","
lineList[sliderNum] = ("\n" + sliderStr)
try:
portFile = open("COMport", "w")
portFile.writelines(lineList)
portFile.close()
logging.info(lineList[sliderNum] + 'added to Slider ' + str(sliderNum))
serialValuetoVolume.init()
except:
logging.debug('Process was not added to Slider ' + str(sliderNum))
# Opens filedialog and allows user to choose .exe file to which they wish to assign slider
def chooseFile():
filetypes = (
('Executables', '*.exe'),
)
filename = filedialog.askopenfilename(
title='Choose a file:',
initialdir='/',
filetypes=filetypes)
# Strip file location and pull just the filename
filename = filename.split('/')
# Return filename.exe
return(str(filename[-1]))
# Create dropdown to choose arduino port
def show():
portLabel.config( textvariable = portsVar.get() )
# Function to delete items from the ListBox and remove the processes from the sliders
def onselect(evt, labelNum):
global lineList
label = labels[labelNum - 1]
print(len(lineList[labelNum]))
# Access storage of processes and create widget that triggers on select event in ListBox
w = evt.widget
try:
index = int(w.curselection()[0]) # Get index of currently selected process in Listbox
value = w.get(index) # Get the name of the process to remove
start = int(lineList[labelNum].find(value)) # Get index of the first letter of the process name
length= int(len(value)) # Get length of the process name
stop = int(length + start + 1) # Create ending index of process name
value1 = (lineList[labelNum][:start] + lineList[labelNum][stop:-1]) # Take linList and create new string with currently selected process removed
lineList[labelNum] = value1 # Substitute new string into lineList
label.delete(index) # Remove the process from the label
print(len(lineList[labelNum]))
# Prevent remove command from emptying the indices of lineList. If the number of indices changes the whole program will oh I don't know decide to rob a liquor store.
if len(lineList[labelNum]) < 3:
lineList[labelNum] += str(labelNum + 1) # Stick in default value for lineList to keep the right number of indices
else:
pass
# Open file and write new lineList
portFile = open("COMport", "w")
portFile.writelines(lineList)
portFile.close()
except IndexError:
pass
# This runs the functions that get serial data, convert to windows accepted values, and assign volumes
def sliderRun():
pythoncom.CoInitialize() # Necessary to run this function in another thread
try: # Attempt to close the program first to make sure it isn't already running
serialValuetoVolume.stop_program()
print("program stopped")
logging.info('Program was stopped before starting again')
except: # If the program throws an exception we assume it's because it's not currently running
pass
serialValuetoVolume.init()
serialValuetoVolume.connectSerial()
serialValuetoVolume.getValues()
def start_clicked():
try:
serialValuetoVolume.stop_program()
logging.info('SerialtoVolume stopped before running')
except:
logging.warning('SerialtoVolume could not stop')
pass
# Creates thread and appends it to thread list
global t
t = threading.Thread(target=sliderRun) # Sets target function that should run in this thread
threads.append(t)
t.start() # Starting thread runs the target function
global startButton
startButton = ttk.Button(frm, text="Restart CTRLdeck", command=start_clicked).place(x=26, y=632) # Rename the 'start' button to 'restart'
# This is the actual closing function which ends the program and it's associated threads. Only accessed by 'Quit' in the taskbar
def on_closing(icon, item):
serialValuetoVolume.stop_program() # serialValuetoVolume loop must be stopped before thread can be exited
logging.warning('Serial to Volume stopped')
# Reset temp file so that the number of entries in list stays the same for next execute. Might be redundant.
portFile = open("COMport", "w")
lineList = ["1", "\n2", "\n3", "\n4", "\n5"]
portFile.writelines(lineList)
portFile.close()
logging.debug('File reset')
try: # Attempt to close thread. This only works if getValues() loop has stopped.
t.join()
t2.join()
logging.debug('Thread for volume control ended')
except: # If this throws an exception we assume it's because it is not running. Could be more
logging.warning('Could not end thread')
pass
icon.stop() # Destroys the system tray icon
logging.debug('Icon destroyed')
root.destroy() # Destroys the window
logging.debug('Window destroyed')
# Recreates the window from the system tray icon
def open_window(icon, item):
root.lift() # Brings window to the front
root.after( 0 , root.deiconify) # Destroys the system tray icon after the window is opened
logging.debug('System tray con was destroyed for window to open')
icon.stop() # Necessary to destroy system tray icon but I don't know why
# Hide the window and show on the system taskbar
def hide_window():
# Store proccesses assigned to sliders to display in icon menu
sliderProcesses = []
try:
for i in range(numSliders):
sliderProcesses.append(str(serialValuetoVolume.sliderProcesses[i]))
except TypeError:
pass
global icon
root.withdraw() # Hides GUI Window
logging.debug('Window hidden')
image=Image.open("fader.ico")
logging.debug('Icon created')
try:
menu=(item('Slider 1: ' + sliderProcesses[0], 0), item('Slider 2: ' + sliderProcesses[1], 0), item('Slider 3: ' + sliderProcesses[2], 0),
item('Slider 4: ' + sliderProcesses[3], 0), item('Restart', start_clicked), item('Show', open_window) , item('Quit', on_closing)) # Creates right click menu and it's options in the system tray icon
icon=pystray.Icon("name", image, "CTRLDeck", menu) # Creates click options on system tray icon
icon.run() # Start system tray icon
logging.debug('System tray icon running')
except IndexError:
menu=(item('You have no processes chosen.', open_window), item('Restart', start_clicked), item('Show', open_window) , item('Quit', on_closing)) # Creates right click menu and it's options in the system tray icon
icon=pystray.Icon("name", image, "CTRLDeck", menu) # Creates click options on system tray icon
icon.run() # Start system tray icon
logging.debug('System tray icon running')
def updateSliderYPos():
pythoncom.CoInitialize()
faderKnobYPosPrev = [0,0,0,0]
faderKnobYPos = [400,400,400,400]
global fader_labels
while True:
faderKnobYPos = serialValuetoVolume.faders.copy()
# faderKnobYPosPrev = faderKnobYPos.copy()
for i in range (len(faderKnobYPos)):
if faderKnobYPos[i] != faderKnobYPosPrev[i]:
faderKnobYPos[i] = interp(faderKnobYPos[i], [0.0,1.0], [511,233])
fader_labels[i].place(x=faderKnobXPos[i], y=faderKnobYPos[i])
faderKnobYPosPrev[i] = faderKnobYPos[i]
sleep(.001)
def startSliderYPos():
global t2
t2 = threading.Thread(target=updateSliderYPos) # Sets target function that should run in this thread
threads.append(t2)
t2.start() # Starting thread runs the target function
#------------------------------------------------------------------
# Create GUI
# -----------------------------------------------------------------
### Create Window
root = Tk()
root.title("CTRLdeck")
root.geometry('1024x702')
# Create background image
bg = PhotoImage(file = "./assets/12x4deck-bkgrd.png")
# Create a child frame from root
frm = ttk.Frame(root, padding = 0)
# Generate grid for alignment purposes
frm.grid()
labelbg = Label(frm, image = bg, width = bg.width(), height = bg.height())
labelbg.grid(column = 0, row = 0)
faderImg = ImageTk.PhotoImage(Image.open("./assets/fader_knob.png"))
### Set COM Port GUI elements
# Call available COM ports and put in a list
portOptions = (serial_ports())
# Set default value for menu
portsVar = StringVar()
portsVar.set("Choose your port:")
# Create port dropdown menu
portDrop = OptionMenu(frm, portsVar, *portOptions, command=savePortChoice).place(x = 867, y = 130)
# Create label
portLabel = Label( frm , textvariable = " " )
### Create slider GUI elements
# Define number of sliders
numSliders = 4
#----------------------------------------------------------------------------
# - Call list of Audio Sessions volume_by_process.py
# - Create dropdown list with a 'clicked' action
# - Display dropdown list in frame
# - Send chosen value to saveSlider()
#----------------------------------------------------------------------------
# Create list of common audio sessions
sessionOptions = ["master", "chrome.exe", "firefox.exe", "discord.exe", "microphone", "unmapped", "Choose a file:" ]
# Store audio sessions for 4 sliders
SliderDropdownsXPositions = [575, 680, 785, 890]
SliderDropdownsYPosition = 613
faderKnobXPos = [596, 693, 797, 901]
sliders = []
for i in range (numSliders):
slider = StringVar()
slider.set("Slider " + str(i+1))
OptionMenu(frm, slider, *sessionOptions, command=lambda event, sliderNum=i+1: saveSlider(sliderNum)).place(x=SliderDropdownsXPositions[i], y=SliderDropdownsYPosition)
sliders.append(slider)
# Create sessionLabels for processes currently controlled by sliders
SliderLabelsXPosition = [575, 680, 785, 890]
SliderLabelsYPositions = 650
labels = []
for i in range (numSliders):
label = Listbox( frm, width=13, bd=0, height=2, selectmode="single", borderwidth=0, )
label.place(x=SliderLabelsXPosition[i], y=SliderLabelsYPositions)
label.bind('<<ListboxSelect>>', lambda evt, labelNum=i+1 : onselect(evt, labelNum))
labels.append(label)
fader_labels = []
for i in faderKnobXPos:
fl = Label(frm, image = faderImg, borderwidth = 0, relief="flat")
fader_labels.append(fl)
# Creates start button that runs the clicked which kicks off the actual program
startButton = ttk.Button(frm, text="Start CTRLdeck", command=start_clicked).place(x=26, y=632)
# Loops the window processes
startSliderYPos()
root.protocol("WM_DELETE_WINDOW", hide_window)
root.mainloop()
|
ServiceUbuntu.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import re
import time
import urllib
import lxml
import threading
import time
import requests
import base64
import json
import http.cookiejar as cookielib
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import config
import codecs
class Application():
# 这个类实现具体的事件处理回调函数。界面生成代码在Application_ui中。
def __init__(self, master=None):
self.InitRuntime()
d = threading.Thread(target=self.InitDriver)
d.start()
print("__init__end")
def InitRuntime(self, event=None):
print("Initing >>>>>>")
self.InitDirs = []
self.InitFiles = []
self.CheckFiles=[]
self.driver = None
self.path = os.path.dirname(os.path.realpath(sys.argv[0]))
self.sys_path = os.path.dirname(os.path.realpath(sys.argv[0]))
print("Current Path:%s" % (self.path))
self.configFilepath = self.path + "/config.ini"
self.InitFiles.append(str(self.configFilepath))
print("ConfigFileCheck Path:%s" % (self.configFilepath))
self.driverFile = self.path + "/driver"
self.CheckFiles.append(str(self.driverFile))
print("DriverFile:%s" % (self.driverFile))
self.tmpDir = self.path + "/tmp/"
self.InitDirs.append(str(self.tmpDir))
print("TmpPath:%s" % (str(self.tmpDir)))
self.configPath = "默认设置"
self.data_path = config.read_config(
self.configFilepath, "默认设置", "save_path")
self.userData_path = config.read_config(
self.configFilepath, "默认设置", "userData_path")
self.InitDirs.append(str(self.data_path))
self.InitDirs.append(str(self.userData_path))
self.initFileSystem()
self.readConfigs()
def initFileSystem(self, event=None):
if self.InitDirs:
for initdir_path in self.InitDirs:
if initdir_path == None:
print("-----None:%s"%(initdir_path))
continue
elif initdir_path == "":
continue
elif not os.path.exists(initdir_path):
try:
print("%s is missing creat..." % (initdir_path))
print(initdir_path)
os.makedirs(initdir_path)
print("%s created!"%(initdir_path))
print("%s inited!" % (initdir_path))
except Exception as e:
print(e)
if self.InitFiles:
for initFile_path in self.InitFiles:
if initFile_path == None:
print("-----None:%s"%(initFile_path))
continue
elif not os.path.exists(initdir_path):
try:
with open(initFile_path, 'w+', encoding='utf-8-sig') as f:
print("%s File Missing CreatingFile..."%(initFile_path))
f.flush()
f.close()
except Exception as e:
print(e)
if self.CheckFiles:
for checkFile_path in self.CheckFiles:
if checkFile_path == None:
print("-----None:%s"%(checkFile_path))
continue
elif not os.path.exists(checkFile_path):
print("EEEEEEERRR:%s"% (checkFile_path))
if (self.userData_path == None):
self.userData_path = ""
if os.path.exists(self.userData_path):
print("userData_path inited")
else:
user_data_dir = self.userData_path
if user_data_dir == None or user_data_dir == "" or not os.path.exists(user_data_dir):
self.userData_path = self.tmpDir+"/" + \
str(time.time()).replace(".", "")+"/"
user_data_dir = self.userData_path
os.makedirs(user_data_dir)
def readConfigs(self, event=None):
self.defaultIndexLink=config.read_config_init(
self.configFilepath, "默认设置", "default_index_link", "https://shangoue.meituan.com/")
self.defaultHomepageLink=config.read_config_init(
self.configFilepath, "默认设置", "default_homepage_link","https://shangoue.meituan.com/#menu#0")
self.defaultMsg= config.read_config_init(
self.configFilepath, "默认设置", "default_msg", "店家正在飞速赶来,请稍候~")
self.repeatedlyMsg= config.read_config_init(
self.configFilepath, "默认设置", "repeatedly_msg", "商家可能在忙,如未能及时回复请致电888-888888")
def InitDriver(self, event=None):
if not os.path.exists(self.driverFile):
return
if self.driver != None:
print("Already exist")
self.loadCookie()
print("cookie Loaded")
self.driver.get(self.defaultHomepageLink)
time.sleep(3)
return
self.driver = None
try:
chrome_options = Options()
user_data_dir = self.userData_path
if user_data_dir == None or user_data_dir == "" or not os.path.exists(user_data_dir):
self.userData_path = self.tmpDir+"/" + \
str(time.time()).replace(".", "")+"/"
user_data_dir = self.userData_path
chrome_options.add_argument('--user-data-dir='+user_data_dir)
chrome_options.add_argument('--disable-extensions')
chrome_options.add_argument('--profile-directory=Default')
chrome_options.add_argument("--incognito")
chrome_options.add_argument("--disable-plugins-discovery")
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument('--ignore-certificate-errors')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--enable-javascript')
chrome_options.add_argument('--log-level=3')
chrome_options.add_argument('--disable-popup-blocking')
chrome_options.add_argument('-–single-process')
chrome_options.add_argument('--ignore-ssl-errors')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
# chrome_options.binary_location = '/opt/google/chrome/chrome'
self.driver = webdriver.Chrome(
options=chrome_options, executable_path=self.driverFile)
self.driver.set_page_load_timeout(8000)
try:
print("HTTPS-GET: %s"%(self.defaultIndexLink))
self.driver.get(self.defaultIndexLink)
except Exception as ee1:
print("EER in HTTPS-GET: %s"%(self.defaultIndexLink))
print(ee1)
self.InitDriver()
except Exception as e:
if 'already in use' in str(e):
self.userData_path = self.tmpDir+"/" + \
str(time.time()).replace(".", "")+"/"
if self.driver != None:
self.driver.quit()
time.sleep(5)
self.InitDriver()
print("in getBrowser")
print(e)
return None
pass
def CheckNewMSG(self, event=None):
try:
pageSource = BeautifulSoup(self.driver.page_source, "lxml")
try:
sg_im_div=pageSource.find("div", id="sg-im")
sg_im_entrance_div=sg_im_div.find("div", class_="wm-im-entrance")
msg_num_b=sg_im_entrance_div.find("b")
self.newMsgNum=msg_num_b.text
return True
except Exception as e:
return False
time.sleep(3)
return True
except Exception as e:
return False
def replyNewMsg(self,event=None):
try:
if not self.imOpened():
self.openIM()
try:
pageSource = BeautifulSoup(self.driver.page_source, "lxml")
sgImList=pageSource.find("div", id="sgImList")
unrreadList = sgImList.findAll("div", class_="wm-im-item-wrapper cursor unread-item")
if len(unrreadList) == 0:
print("lastRplyUserMsg")
textareapath="//*[@id=\"sg-im-dialog\"]/div[2]/div[3]/div[3]/textarea"
self.driver.find_element_by_xpath(textareapath).send_keys(self.repeatedlyMsg)
sendbtnpath="//*[@id=\"sg-im-dialog\"]/div[2]/div[3]/div[4]/button"
sendbtn = self.driver.find_element_by_xpath(sendbtnpath)
webdriver.ActionChains(self.driver).move_to_element(sendbtn).click(sendbtn).perform()
self.closeIM()
return
for unReadmsgDiv in unrreadList:
unReadmsgDivId=unReadmsgDiv.attrs['data-uid']
print("unread msg id :%s"%(unReadmsgDivId))
if "selected" not in unReadmsgDiv.attrs['class']:
msg=self.defaultMsg
unreadpath="//*[@data-uid=\"%s\"]"%(str(unReadmsgDivId))
msgtab = self.driver.find_element_by_xpath(unreadpath)
webdriver.ActionChains(self.driver).move_to_element(
msgtab).click(msgtab).perform()
print("clicked ")
textareapath="//*[@id=\"sg-im-dialog\"]/div[2]/div[3]/div[3]/textarea"
self.driver.find_element_by_xpath(textareapath).send_keys(self.defaultMsg)
sendbtnpath="//*[@id=\"sg-im-dialog\"]/div[2]/div[3]/div[4]/button"
sendbtn = self.driver.find_element_by_xpath(sendbtnpath)
webdriver.ActionChains(self.driver).move_to_element(sendbtn).click(sendbtn).perform()
else:
print("already select")
time.sleep(1)
self.closeIM()
except Exception as e2:
print(e2)
print("err close Im now")
self.closeIM()
except Exception as e:
print(e)
def openIM(self,event=None):
try:
# wm-im-entrance
imlinkxpath="//*[@id=\"sg-im\"]/div/div[1]/div[1]"
imimg = self.driver.find_element_by_xpath(
imlinkxpath)
webdriver.ActionChains(self.driver).move_to_element(
imimg).click(imimg).perform()
except Exception as e:
print(e)
def closeIM(self,event=None):
try:
# wm-im-entrance
imlinkxpath="//*[@id=\"sg-im-dialog\"]/div[2]/div[1]/i[1]"
imimg = self.driver.find_element_by_xpath(
imlinkxpath)
webdriver.ActionChains(self.driver).move_to_element(
imimg).click(imimg).perform()
except Exception as e:
print(e)
def imOpened(self,event=None):
openstatus=False
try:
pageSource = BeautifulSoup(self.driver.page_source, "lxml")
try:
sg_im_div=pageSource.find("div", id="sg-im-dialog")
if sg_im_div.attrs['style']==None:
openstatus=True
else:
if "none" in sg_im_div.attrs['style']:
openstatus=False
else:
openstatus=True
except Exception as e:
print(e)
except Exception as e2:
print(e2)
msg="关闭"
if openstatus:
msg="打开"
print("IM 状态:%s"%(msg))
return openstatus
def loadCookie(self, event=None):
print("尝试读取登录历史->登录网站")
cookieFileName = self.sys_path+"/"+'cookies.json'
if not os.path.exists(cookieFileName):
print("登录文件不存在,取消历史登录")
with open(cookieFileName, 'w+', encoding='utf-8-sig') as f:
print("Cookie File Missing CreatingFile...")
f.flush()
f.close()
return False
else:
print("登录文件:[%s],尝试历史登录" % (cookieFileName))
with open(cookieFileName, 'r+', encoding='utf-8-sig') as f:
data = f.read()
if len(data) < 3 or data == "":
print("JSON ERR:[%s]" % (data))
return False
listCookies = {}
try:
listCookies = json.loads(data)
for cookie in listCookies:
try:
self.driver.add_cookie(cookie)
print("cookie already load")
except Exception as ed:
print(ed)
print(cookie)
pass
except Exception as e:
print("in listCookies")
print(e)
return False
return True
def mainloop(self,event=None):
print("start process")
while self.RunningProcess:
hasMsg=self.CheckNewMSG()
if hasMsg:
print("有%s条新消息啦!"%(self.newMsgNum))
self.replyNewMsg()
else:
print("等待新消息~~~")
time.sleep(2)
def str2Int(str):
if str == None or str == "":
return 0
return int(str)
def getNoSpacestr(oldStr):
newStr = ""
firstSpace = True
for c in oldStr:
if c == " " and firstSpace:
pass
else:
firstSpace=False
newStr += c
if str(newStr).endswith(" "):
endStr = ""
for d in newStr:
endStr = d+endStr
endStr = getNoSpacestr(endStr)
newStr = ""
for e in endStr:
newStr = e+newStr
return newStr
if __name__ == "__main__":
app = Application()
app.RunningProcess = True
d = threading.Thread(target=app.InitDriver)
e= threading.Thread(target=app.mainloop)
d.start()
time.sleep(15)
e.start()
pass
|
data.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import logging
import sys
import numbers
import math
import sklearn
import datetime
import numpy as np
import cv2
import mxnet as mx
from mxnet import ndarray as nd
#from . import _ndarray_internal as _internal
#from mxnet._ndarray_internal import _cvimresize as imresize
#from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from mxnet import io
from mxnet import recordio
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_preprocess
import multiprocessing
logger = logging.getLogger()
def pick_triplets_impl(q_in, q_out):
more = True
while more:
deq = q_in.get()
if deq is None:
more = False
else:
embeddings, emb_start_idx, nrof_images, alpha = deq
print('running', emb_start_idx, nrof_images, os.getpid())
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
#triplets.append( (a_idx, p_idx, n_idx) )
q_out.put( (a_idx, p_idx, n_idx) )
#emb_start_idx += nrof_images
print('exit',os.getpid())
class FaceImageIter(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec = None,
shuffle=False, aug_list=None, mean = None,
rand_mirror = False,
c2c_threshold = 0.0, output_c2c = 0, c2c_mode = -10,
ctx_num = 0, images_per_identity = 0, data_extra = None, hard_mining = False,
triplet_params = None, coco_mode = False,
mx_model = None,
data_name='data', label_name='softmax_label', **kwargs):
super(FaceImageIter, self).__init__()
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4]+".idx"
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
self.idx2cos = {}
self.idx2flag = {}
self.idx2meancos = {}
self.c2c_auto = False
if output_c2c or c2c_threshold>0.0 or c2c_mode>=-5:
path_c2c = os.path.join(os.path.dirname(path_imgrec), 'c2c')
print(path_c2c)
if os.path.exists(path_c2c):
for line in open(path_c2c, 'r'):
vec = line.strip().split(',')
idx = int(vec[0])
self.idx2cos[idx] = float(vec[1])
self.idx2flag[idx] = 1
if len(vec)>2:
self.idx2flag[idx] = int(vec[2])
else:
self.c2c_auto = True
self.c2c_step = 10000
if header.flag>0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
self.imgidx = range(1, int(header.label[0]))
if c2c_mode==0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f!=1:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==1:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2 and c>=0.05:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==2:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2 and c>=0.1:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==-1:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2:
continue
if c<0.7:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==-2:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2:
continue
if c<0.73:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_threshold>0.0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if c<c2c_threshold:
continue
imgidx2.append(idx)
print(len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
#print('flag', header.flag)
#print(header.label)
#assert(header.flag==2)
self.id2range[identity] = (a,b)
if len(self.idx2cos)>0:
m = 0.0
for ii in xrange(a,b):
m+=self.idx2cos[ii]
m/=(b-a)
for ii in xrange(a,b):
self.idx2meancos[ii] = m
#self.idx2meancos[identity] = m
print('id2range', len(self.id2range))
print(len(self.idx2cos), len(self.idx2meancos))
else:
self.imgidx = list(self.imgrec.keys)
if shuffle:
self.seq = self.imgidx
self.oseq = self.imgidx
print(len(self.seq))
else:
self.seq = None
self.mean = mean
self.nd_mean = None
if self.mean:
self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)
self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.image_size = '%d,%d'%(data_shape[1],data_shape[2])
self.rand_mirror = rand_mirror
print('rand_mirror', rand_mirror)
#self.cast_aug = mx.image.CastAug()
#self.color_aug = mx.image.ColorJitterAug(0.4, 0.4, 0.4)
self.ctx_num = ctx_num
self.c2c_threshold = c2c_threshold
self.output_c2c = output_c2c
self.per_batch_size = int(self.batch_size/self.ctx_num)
self.images_per_identity = images_per_identity
if self.images_per_identity>0:
self.identities = int(self.per_batch_size/self.images_per_identity)
self.per_identities = self.identities
self.repeat = 3000000.0/(self.images_per_identity*len(self.id2range))
self.repeat = int(self.repeat)
print(self.images_per_identity, self.identities, self.repeat)
self.data_extra = None
if data_extra is not None:
self.data_extra = nd.array(data_extra)
self.provide_data = [(data_name, (batch_size,) + data_shape), ('extra', data_extra.shape)]
self.hard_mining = hard_mining
self.mx_model = mx_model
if self.hard_mining:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_params = triplet_params
self.triplet_mode = False
self.coco_mode = coco_mode
if len(label_name)>0:
if output_c2c:
self.provide_label = [(label_name, (batch_size,2))]
else:
self.provide_label = [(label_name, (batch_size,))]
else:
self.provide_label = []
print(self.provide_label[0][1])
if self.coco_mode:
assert self.triplet_params is None
assert self.images_per_identity>0
if self.triplet_params is not None:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_bag_size = self.triplet_params[0]
self.triplet_alpha = self.triplet_params[1]
self.triplet_max_ap = self.triplet_params[2]
assert self.triplet_bag_size>0
assert self.triplet_alpha>=0.0
assert self.triplet_alpha<=1.0
self.triplet_mode = True
self.triplet_oseq_cur = 0
self.triplet_oseq_reset()
self.seq_min_size = self.batch_size*2
self.cur = 0
self.nbatch = 0
self.is_init = False
self.times = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#self.reset()
def ____pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
people_per_batch = len(nrof_images_per_class)
nrof_threads = 8
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
processes = [multiprocessing.Process(target=pick_triplets_impl, args=(q_in, q_out)) \
for i in range(nrof_threads)]
for p in processes:
p.start()
# VGG Face: Choosing good triplets is crucial and should strike a balance between
# selecting informative (i.e. challenging) examples and swamping training with examples that
# are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling
# the image n at random, but only between the ones that violate the triplet loss margin. The
# latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than
# choosing the maximally violating example, as often done in structured output learning.
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
job = (embeddings, emb_start_idx, nrof_images, self.triplet_alpha)
emb_start_idx+=nrof_images
q_in.put(job)
for i in xrange(nrof_threads):
q_in.put(None)
print('joining')
for p in processes:
p.join()
print('joined')
q_out.put(None)
triplets = []
more = True
while more:
triplet = q_out.get()
if triplet is None:
more = False
else:
triplets.append(triplets)
np.random.shuffle(triplets)
return triplets
#cal pairwise dists on single gpu
def _pairwise_dists(self, embeddings):
nd_embedding = mx.nd.array(embeddings, mx.gpu(0))
pdists = []
for idx in xrange(embeddings.shape[0]):
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
ret = body.asnumpy()
#print(ret.shape)
pdists.append(ret)
return pdists
def pairwise_dists(self, embeddings):
nd_embedding_list = []
for i in xrange(self.ctx_num):
nd_embedding = mx.nd.array(embeddings, mx.gpu(i))
nd_embedding_list.append(nd_embedding)
nd_pdists = []
pdists = []
for idx in xrange(embeddings.shape[0]):
emb_idx = idx%self.ctx_num
nd_embedding = nd_embedding_list[emb_idx]
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
nd_pdists.append(body)
if len(nd_pdists)==self.ctx_num or idx==embeddings.shape[0]-1:
for x in nd_pdists:
pdists.append(x.asnumpy())
nd_pdists = []
return pdists
def pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
#self.time_reset()
pdists = self.pairwise_dists(embeddings)
#self.times[3] += self.time_elapsed()
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
for j in xrange(1,nrof_images):
#self.time_reset()
a_idx = emb_start_idx + j - 1
#neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
neg_dists_sqr = pdists[a_idx]
#self.times[3] += self.time_elapsed()
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
#self.time_reset()
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
#self.times[4] += self.time_elapsed()
#self.time_reset()
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
if self.triplet_max_ap>0.0:
if pos_dist_sqr>self.triplet_max_ap:
continue
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#self.times[5] += self.time_elapsed()
#self.time_reset()
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def __pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
if nrof_images<2:
continue
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
pcount = nrof_images-1
dists_a2all = np.sum(np.square(embeddings[a_idx] - embeddings), 1) #(N,)
#print(a_idx, dists_a2all.shape)
ba = emb_start_idx
bb = emb_start_idx+nrof_images
sorted_idx = np.argsort(dists_a2all)
#print('assert', sorted_idx[0], a_idx)
#assert sorted_idx[0]==a_idx
#for idx in sorted_idx:
# print(idx, dists_a2all[idx])
p2n_map = {}
pfound = 0
for idx in sorted_idx:
if idx==a_idx: #is anchor
continue
if idx<bb and idx>=ba: #is pos
p2n_map[idx] = [dists_a2all[idx], []] #ap, [neg_list]
pfound+=1
else: # is neg
an = dists_a2all[idx]
if pfound==pcount and len(p2n_map)==0:
break
to_del = []
for p_idx in p2n_map:
v = p2n_map[p_idx]
an_ap = an - v[0]
if an_ap<self.triplet_alpha:
v[1].append(idx)
else:
#output
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
to_del.append(p_idx)
for _del in to_del:
del p2n_map[_del]
for p_idx,v in p2n_map.iteritems():
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def triplet_oseq_reset(self):
#reset self.oseq by identities seq
self.triplet_oseq_cur = 0
ids = []
for k in self.id2range:
ids.append(k)
random.shuffle(ids)
self.oseq = []
for _id in ids:
v = self.id2range[_id]
_list = range(*v)
random.shuffle(_list)
if len(_list)>self.images_per_identity:
_list = _list[0:self.images_per_identity]
self.oseq += _list
print('oseq', len(self.oseq))
def time_reset(self):
self.time_now = datetime.datetime.now()
def time_elapsed(self):
time_now = datetime.datetime.now()
diff = time_now - self.time_now
return diff.total_seconds()
def select_triplets(self):
self.seq = []
while len(self.seq)<self.seq_min_size:
self.time_reset()
embeddings = None
bag_size = self.triplet_bag_size
batch_size = self.batch_size
#data = np.zeros( (bag_size,)+self.data_shape )
#label = np.zeros( (bag_size,) )
tag = []
#idx = np.zeros( (bag_size,) )
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
print('triplet time stat', self.times)
if self.triplet_oseq_cur+bag_size>len(self.oseq):
self.triplet_oseq_reset()
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
self.times[0] += self.time_elapsed()
self.time_reset()
#print(data.shape)
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
ba = 0
while True:
bb = min(ba+batch_size, bag_size)
if ba>=bb:
break
#_batch = self.data_iter.next()
#_data = _batch.data[0].asnumpy()
#print(_data.shape)
#_label = _batch.label[0].asnumpy()
#data[ba:bb,:,:,:] = _data
#label[ba:bb] = _label
for i in xrange(ba, bb):
_idx = self.oseq[i+self.triplet_oseq_cur]
s = self.imgrec.read_idx(_idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i-ba][:] = self.postprocess_data(img)
label[i-ba][:] = header.label
tag.append( ( int(header.label), _idx) )
#idx[i] = _idx
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
#print('eval for selecting triplets',ba,bb)
#print(net_out)
#print(len(net_out))
#print(net_out[0].asnumpy())
net_out = net_out[0].asnumpy()
#print(net_out)
#print('net_out', net_out.shape)
if embeddings is None:
embeddings = np.zeros( (bag_size, net_out.shape[1]))
embeddings[ba:bb,:] = net_out
ba = bb
assert len(tag)==bag_size
self.triplet_oseq_cur+=bag_size
embeddings = sklearn.preprocessing.normalize(embeddings)
self.times[1] += self.time_elapsed()
self.time_reset()
nrof_images_per_class = [1]
for i in xrange(1, bag_size):
if tag[i][0]==tag[i-1][0]:
nrof_images_per_class[-1]+=1
else:
nrof_images_per_class.append(1)
triplets = self.pick_triplets(embeddings, nrof_images_per_class) # shape=(T,3)
print('found triplets', len(triplets))
ba = 0
while True:
bb = ba+self.per_batch_size//3
if bb>len(triplets):
break
_triplets = triplets[ba:bb]
for i in xrange(3):
for triplet in _triplets:
_pos = triplet[i]
_idx = tag[_pos][1]
self.seq.append(_idx)
ba = bb
self.times[2] += self.time_elapsed()
def triplet_reset(self):
self.select_triplets()
def hard_mining_reset(self):
#import faiss
from annoy import AnnoyIndex
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
#label = np.zeros( self.provide_label[0][1] )
X = None
ba = 0
batch_num = 0
while ba<len(self.oseq):
batch_num+=1
if batch_num%10==0:
print('loading batch',batch_num, ba)
bb = min(ba+self.batch_size, len(self.oseq))
_count = bb-ba
for i in xrange(_count):
idx = self.oseq[i+ba]
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i][:] = self.postprocess_data(img)
label[i][:] = header.label
db = mx.io.DataBatch(data=(data,self.data_extra), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
embedding = net_out[0].asnumpy()
nembedding = sklearn.preprocessing.normalize(embedding)
if _count<self.batch_size:
nembedding = nembedding[0:_count,:]
if X is None:
X = np.zeros( (len(self.id2range), nembedding.shape[1]), dtype=np.float32 )
nplabel = label.asnumpy()
for i in xrange(_count):
ilabel = int(nplabel[i])
#print(ilabel, ilabel.__class__)
X[ilabel] += nembedding[i]
ba = bb
X = sklearn.preprocessing.normalize(X)
d = X.shape[1]
t = AnnoyIndex(d, metric='euclidean')
for i in xrange(X.shape[0]):
t.add_item(i, X[i])
print('start to build index')
t.build(20)
print(X.shape)
k = self.per_identities
self.seq = []
for i in xrange(X.shape[0]):
nnlist = t.get_nns_by_item(i, k)
assert nnlist[0]==i
for _label in nnlist:
assert _label<len(self.id2range)
_id = self.header0[0]+_label
v = self.id2range[_id]
_list = range(*v)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
#faiss_params = [20,5]
#quantizer = faiss.IndexFlatL2(d) # the other index
#index = faiss.IndexIVFFlat(quantizer, d, faiss_params[0], faiss.METRIC_L2)
#assert not index.is_trained
#index.train(X)
#index.add(X)
#assert index.is_trained
#print('trained')
#index.nprobe = faiss_params[1]
#D, I = index.search(X, k) # actual search
#print(I.shape)
#self.seq = []
#for i in xrange(I.shape[0]):
# #assert I[i][0]==i
# for j in xrange(k):
# _label = I[i][j]
# assert _label<len(self.id2range)
# _id = self.header0[0]+_label
# v = self.id2range[_id]
# _list = range(*v)
# if len(_list)<self.images_per_identity:
# random.shuffle(_list)
# else:
# _list = np.random.choice(_list, self.images_per_identity, replace=False)
# for i in xrange(self.images_per_identity):
# _idx = _list[i%len(_list)]
# self.seq.append(_idx)
def reset_c2c(self):
self.select_triplets()
for identity,v in self.id2range.iteritems():
_list = range(*v)
for idx in _list:
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, args.ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in xrange(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
net_out = net_out[0].asnumpy()
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
embedding = np.mean(embeddings, axis=0, keepdims=True)
embedding = sklearn.preprocessing.normalize(embedding)
sims = np.dot(embeddings, embedding).flatten()
assert len(sims)==len(_list)
for i in xrange(len(_list)):
_idx = _list[i]
self.idx2cos[_idx] = sims[i]
def reset(self):
"""Resets the iterator to the beginning of the data."""
print('call reset()')
if self.c2c_auto:
self.reset_c2c()
self.cur = 0
if self.images_per_identity>0:
if self.triplet_mode:
self.triplet_reset()
elif not self.hard_mining:
self.seq = []
idlist = []
for _id,v in self.id2range.iteritems():
idlist.append((_id,range(*v)))
for r in xrange(self.repeat):
if r%10==0:
print('repeat', r)
if self.shuffle:
random.shuffle(idlist)
for item in idlist:
_id = item[0]
_list = item[1]
#random.shuffle(_list)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in xrange(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
else:
self.hard_mining_reset()
print('seq len', len(self.seq))
else:
if self.shuffle:
random.shuffle(self.seq)
if self.seq is None and self.imgrec is not None:
self.imgrec.reset()
def num_samples(self):
return len(self.seq)
def next_sample(self):
"""Helper function for reading in next sample."""
#set total batch size, for example, 1800, and maximum size for each people, for example 45
if self.seq is not None:
while True:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
label = header.label
if self.output_c2c:
meancos = self.idx2meancos[idx]
label = [label, meancos]
else:
if not isinstance(label, numbers.Number):
label = label[0]
return label, img, None, None
else:
label, fname, bbox, landmark = self.imglist[idx]
return label, self.read_image(fname), bbox, landmark
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img, None, None
def brightness_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(self, img, x):
augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]
random.shuffle(augs)
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def mirror_aug(self, img):
_rd = random.randint(0,1)
if _rd==1:
for c in xrange(img.shape[2]):
img[:,:,c] = np.fliplr(img[:,:,c])
return img
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
self.nbatch+=1
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label is not None:
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
_data = self.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.nd_mean is not None:
_data = _data.astype('float32')
_data -= self.nd_mean
_data *= 0.0078125
#_npdata = _data.asnumpy()
#if landmark is not None:
# _npdata = face_preprocess.preprocess(_npdata, bbox = bbox, landmark=landmark, image_size=self.image_size)
#if self.rand_mirror:
# _npdata = self.mirror_aug(_npdata)
#if self.mean is not None:
# _npdata = _npdata.astype(np.float32)
# _npdata -= self.mean
# _npdata *= 0.0078125
#nimg = np.zeros(_npdata.shape, dtype=np.float32)
#nimg[self.patch[1]:self.patch[3],self.patch[0]:self.patch[2],:] = _npdata[self.patch[1]:self.patch[3], self.patch[0]:self.patch[2], :]
#_data = mx.nd.array(nimg)
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
#data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
if self.provide_label is not None:
if not self.coco_mode:
if len(batch_label.shape)==1:
batch_label[i][:] = label
else:
for ll in xrange(batch_label.shape[1]):
v = label[ll]
if ll>0:
c2c = v
#m = min(0.55, max(0.3,math.log(c2c+1)*4-1.85))
#v = math.cos(m)
#v = v*v
#_param = [0.5, 0.3, 0.85, 0.7]
_param = [0.5, 0.4, 0.85, 0.75]
#_param = [0.55, 0.4, 0.9, 0.75]
_a = (_param[1]-_param[0])/(_param[3]-_param[2])
m = _param[1]+_a*(c2c-_param[3])
m = min(_param[0], max(_param[1],m))
#m = 0.5
#if c2c<0.77:
# m = 0.3
#elif c2c<0.82:
# m = 0.4
#elif c2c>0.88:
# m = 0.55
v = math.cos(m)
v = v*v
#print('c2c', i,c2c,m,v)
batch_label[i][ll] = v
else:
batch_label[i][:] = (i%self.per_batch_size)//self.images_per_identity
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
#print('next end', batch_size, i)
_label = None
if self.provide_label is not None:
_label = [batch_label]
if self.data_extra is not None:
return io.DataBatch([batch_data, self.data_extra], _label, batch_size - i)
else:
return io.DataBatch([batch_data], _label, batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
img = mx.image.imdecode(s) #mx.ndarray
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
class FaceImageIterList(io.DataIter):
def __init__(self, iter_list):
assert len(iter_list)>0
self.provide_data = iter_list[0].provide_data
self.provide_label = iter_list[0].provide_label
self.iter_list = iter_list
self.cur_iter = None
def reset(self):
self.cur_iter.reset()
def next(self):
self.cur_iter = random.choice(self.iter_list)
while True:
try:
ret = self.cur_iter.next()
except StopIteration:
self.cur_iter.reset()
continue
return ret
|
extraVMmetrics.py
|
__author__="mcanuto"
__date__ ="$Feb 13, 2014 6:11:42 PM$"
import sys
from time import sleep
import os
import gmetric
from domain_info import domainsVM, VMobject
from countersMetrics import CountersMetrics
from rawCountersMetrics import RawCountersMetrics
from threading import Thread
from gmetric import GmetricConf
from logging import handlers
import threading
import guestfs
import errno
import gmetric
import Queue
import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
handler = handlers.RotatingFileHandler('extraMetrics.log', maxBytes=1024*1024)
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
check_vm_interval = 2
MAX_COUNT = 6
vm_pid_path_prefix = '/var/run/libvirt/qemu/'
class ExtraVMmetrics:
def __init__(self, vm_file_path, vm_metrics_interval, mconf, gconf, counters_directory, counters_interval, counters_list, raw_counters_list, get_vm_metrics, get_vm_counters, get_vm_raw_counters):
self.vm_file_path = vm_file_path
self.vm_metrics_interval = vm_metrics_interval
self.mconf = mconf
self.gconf = gconf
self.counters_interval = counters_interval
self.counters_list = counters_list
self.raw_counters_list = raw_counters_list
self.get_vm_metrics = get_vm_metrics
self.get_vm_counters = get_vm_counters
self.get_vm_raw_counters = get_vm_raw_counters
self.counters_directory = counters_directory
self._stopevent = threading.Event()
def stopThreads(self):
logger.info("stopping threads...")
self._stopevent.set()
def collectVMmetrics(self):
domains = domainsVM()
try:
while not self._stopevent.isSet():
#while True:
#for i in range(1,6):
#print "...getting Domains..."
dom_list = domains.getDomains()
#print "Number of VMs:", len(dom_list)
for key in dom_list:
if dom_list[key].firstTime:
vm_name = dom_list[key].name
if self.get_vm_metrics is True:
# for each NEW VM start a thread for collecting and sending VM metrics
logger.info("New VM detected: starting thread for %s", vm_name)
thread_VMmetric = Thread(target = self.readVMmetrics, args=(vm_name, ))
thread_VMmetric.daemon = True
thread_VMmetric.start()
if self.get_vm_counters or self.get_vm_raw_counters:
# get pid of vm
path = vm_pid_path_prefix + vm_name + '.pid'
with open(path, 'r') as f:
vm_pid = f.read()
if self.get_vm_counters is True and self.counters_list:
# for each NEW VM start a thread for collecting and sending counters metrics
logger.info("New VM detected: starting collecting VM Counters - thread for %s", vm_name)
counters_metric = CountersMetrics(self.counters_directory, self.counters_interval, self.counters_list, self.mconf, self.gconf, vm_pid, vm_name)
thread_VMCountersMetric = Thread(target = counters_metric.collectCountersMetrics)
thread_VMCountersMetric.daemon = True
thread_VMCountersMetric.start()
#send metrics
if self.get_vm_raw_counters is True and self.raw_counters_list:
# for each NEW VM start a thread for collecting and sending counters metrics
logger.info("New VM detected: starting collecting VM Raw Counters - thread for %s", vm_name)
raw_counters_metric = RawCountersMetrics(self.counters_directory, self.counters_interval, self.raw_counters_list, self.mconf, self.gconf, vm_pid, vm_name)
thread_VMRawCountersMetric = Thread(target = raw_counters_metric.collectCountersMetrics)
thread_VMRawCountersMetric.daemon = True
thread_VMRawCountersMetric.start()
sleep(check_vm_interval)
logger.info("All VM threads terminated")
except (KeyboardInterrupt, SystemExit):
self.stopThreads()
self.exit = True
sys.exit(0)
def readVMmetrics(self, vm_name):
gmetric_obj = gmetric.Gmetric(self.gconf.host, self.gconf.port, self.gconf.protocol)
list_metrics = {}
exec_time = 0
while not self._stopevent.isSet():
#check if vm is still alive
domains = domainsVM()
dom_list = domains.getDomains()
if vm_name not in dom_list:
logger.info("%s has been destroyed", vm_name)
break
try:
start_time = time.time()
g = guestfs.GuestFS ()
# Attach the disk image read-only to libguestfs.
g.add_domain(vm_name, readonly=1)
# Run the libguestfs back-end.
g.launch()
# Ask libguestfs to inspect for operating systems.
roots = g.inspect_os ()
if len (roots) == 0:
logger.error("no operating systems found")
break
if len (roots) > 1:
logger.error("dual/multi-boot images are not supported")
break
root = roots[0]
# Mount up the disks, like guestfish -i.
mps = g.inspect_get_mountpoints (root)
for device in mps:
try:
g.mount(device[1], device[0])
except RuntimeError as msg:
logger.error("%s (ignored)",msg)
try:
lines = g.read_lines(self.vm_file_path)
for l in lines:
if len(l.strip()) > 0:
token = l.split('|')
n = token[0].strip()
v = token[1].strip()
list_metrics[n] = v
#send metrics
self.sendVMmetrics(list_metrics, gmetric_obj, vm_name)
except RuntimeError, io:
logger.warning("%s %s %s", threading.currentThread().name, vm_name, io)
g.umount_all()
g.close()
exec_time = time.time() - start_time
except (KeyboardInterrupt, SystemExit):
self.stopThreads()
sys.exit(0)
except Exception, e:
logger.error("%s",e)
sleep_time = float(self.vm_metrics_interval) - float(exec_time)
if sleep_time < 0:
sleep_time = float(self.vm_metrics_interval)
#print "sleep:", sleep_time
sleep(sleep_time)
logger.info("Terminating %s", threading.currentThread().name)
def sendVMmetrics(self, list_metrics, gmetric_obj, vm_name):
logger.info("%s: sending metrics for %s", threading.currentThread().name, vm_name)
#send metric
for key,value in list_metrics.items():
n = vm_name+".v"+key
if key in self.mconf:
#gmetric_obj.send("TESTGROUP", "812344", 'float', "kb", "both", 50, 500, 'vm memory', "127.0.0.1:minerva-21")
if self.mconf[key]["spoof"].lower() == "yes":
gmetric_obj.send(n , value, self.mconf[key]["type"], self.mconf[key]["units"], self.gconf.slope, self.mconf[key]["tmax"], self.mconf[key]["dmax"], self.mconf[key]["group"], self.gconf.spoof)
else:
gmetric_obj.send(n , value, self.mconf[key]["type"], self.mconf[key]["units"], self.gconf.slope, self.mconf[key]["tmax"], self.mconf[key]["dmax"], self.mconf[key]["group"])
|
PySC2_A3C_old.py
|
"""
PySC2_A3C_old.py
A script for training and running an A3C agent on the PySC2 environment, with reference to DeepMind's paper:
[1] Vinyals, Oriol, et al. "Starcraft II: A new challenge for reinforcement learning." arXiv preprint arXiv:1708.04782 (2017).
Advantage estimation uses generalized advantage estimation from:
[2] Schulman, John, et al. "High-dimensional continuous control using generalized advantage estimation." arXiv preprint arXiv:1506.02438 (2015).
Credit goes to Arthur Juliani for providing for reference an implementation of A3C for the VizDoom environment
https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-8-asynchronous-actor-critic-agents-a3c-c88f72a5e9f2
https://github.com/awjuliani/DeepRL-Agents
Note:
Currently only works on the DefeatRoaches mini-game; work is in-progress for generalizing the script to run on all mini-games
"""
import threading
import psutil
import numpy as np
import tensorflow as tf
import scipy.signal
from time import sleep
import os
from pysc2.env import sc2_env
from pysc2.env import environment
from pysc2.lib import actions
"""
Use the following command to launch Tensorboard:
tensorboard --logdir=worker_0:'./train_0',worker_1:'./train_1',worker_2:'./train_2',worker_3:'./train_3'
"""
## HELPER FUNCTIONS
# Copies one set of variables to another.
# Used to set worker network parameters to those of global network.
def update_target_graph(from_scope,to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, to_scope)
op_holder = []
for from_var,to_var in zip(from_vars,to_vars):
op_holder.append(to_var.assign(from_var))
return op_holder
# Processes PySC2 observations
def process_observation(observation):
nonspatial_size = 727
screen_channels = 7
multi_select_max = 100
# is episode over?
episode_end = observation.step_type == environment.StepType.LAST
# reward
reward = observation.reward
# features
features = observation.observation
# nonspatial features
# TimeStep.observation['control_groups'](10,2)
# TimeStep.observation['single_select'](1,7)
# TimeStep.observation['multi_select'](n,7)
nonspatial_stack = features['control_groups'].reshape(-1)
nonspatial_stack = np.concatenate((nonspatial_stack, features['single_select'].reshape(-1)))
multi_select = features['multi_select'].reshape(-1)
# if multi_select has less than multi_select_max units, pad with zeros
if len(multi_select) < multi_select_max * 7:
multi_select = np.concatenate((multi_select, np.zeros(multi_select_max * 7 - len(multi_select))))
nonspatial_stack = np.concatenate((nonspatial_stack, multi_select))
# spatial_minimap features
# not used for DefeatRoaches since no camera movement is required
minimap_stack = None
# spatial_screen features
# TimeStep.observation['screen'][5] (player_relative)
# TimeStep.observation['screen'][6] (unit_type)
# TimeStep.observation['screen'][7] (selected)
# TimeStep.observation['screen'][8] (unit_hit_points)
# TimeStep.observation['screen'][9] (unit_hit_points_ratio)
# TimeStep.observation['screen'][14] (unit_density)
# TimeStep.observation['screen'][15] (unit_density_aa)
screen_stack = np.stack((features['screen'][5], features['screen'][6], features['screen'][7], features['screen'][8], features['screen'][9], features['screen'][14], features['screen'][15]), axis=2)
return reward, nonspatial_stack.reshape([-1,nonspatial_size]), minimap_stack, screen_stack.reshape([-1,64,64,screen_channels]), episode_end
# Discounting function used to calculate discounted returns.
def discount(x, gamma):
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
# Used to initialize weights for policy and value output layers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
# Sample from distribution of arguments
def sample_dist(dist):
sample = np.random.choice(dist[0],p=dist[0])
sample = np.argmax(dist == sample)
return sample
## ACTOR-CRITIC NETWORK
class AC_Network():
def __init__(self,scope,trainer):
with tf.variable_scope(scope):
# Architecture here follows Atari-net Agent described in [1] Section 4.3
nonspatial_size = 727
screen_channels = 7
self.inputs_nonspatial = tf.placeholder(shape=[None,nonspatial_size], dtype=tf.float32)
self.inputs_spatial_screen_reshaped = tf.placeholder(shape=[None,64,64,screen_channels], dtype=tf.float32)
self.nonspatial_dense = tf.layers.dense(
inputs=self.inputs_nonspatial,
units=32,
activation=tf.tanh)
self.screen_conv1 = tf.layers.conv2d(
inputs=self.inputs_spatial_screen_reshaped,
filters=16,
kernel_size=[8,8],
strides=[4,4],
padding='valid',
activation=tf.nn.relu)
self.screen_conv2 = tf.layers.conv2d(
inputs=self.screen_conv1,
filters=32,
kernel_size=[4,4],
strides=[2,2],
padding='valid',
activation=tf.nn.relu)
# According to [1]: "The results are concatenated and sent through a linear layer with a ReLU activation."
self.latent_vector = tf.layers.dense(
inputs=tf.concat([self.nonspatial_dense, tf.reshape(self.screen_conv2,shape=[-1,6*6*32])], axis=1),
units=256,
activation=tf.nn.relu)
# Output layers for policy and value estimations
# 12 policy networks for base actions and arguments
# - All modeled independently
# - Spatial arguments have the x and y values modeled independently as well
# 1 value network
self.policy_base_actions = tf.layers.dense(
inputs=self.latent_vector,
units=17,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_add = tf.layers.dense(
inputs=self.latent_vector,
units=2,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
self.policy_arg_queued = tf.layers.dense(
inputs=self.latent_vector,
units=2,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(1.0))
self.policy_arg_select_point_act = tf.layers.dense(
inputs=self.latent_vector,
units=4,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_unit_act = tf.layers.dense(
inputs=self.latent_vector,
units=4,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_control_group_act = tf.layers.dense(
inputs=self.latent_vector,
units=5,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_control_group_id = tf.layers.dense(
inputs=self.latent_vector,
units=10,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_select_unit_id = tf.layers.dense(
inputs=self.latent_vector,
units=500,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen_x = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen_y = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen2_x = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.policy_arg_screen2_y = tf.layers.dense(
inputs=self.latent_vector,
units=64,
activation=tf.nn.softmax,
kernel_initializer=normalized_columns_initializer(0.01))
self.value = tf.layers.dense(
inputs=self.latent_vector,
units=1,
kernel_initializer=normalized_columns_initializer(1.0))
# Only the worker network need ops for loss functions and gradient updating.
# calculates the losses
# self.gradients - gradients of loss wrt local_vars
# applies the gradients to update the global network
if scope != 'global':
self.actions_base = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_base = tf.one_hot(self.actions_base,17,dtype=tf.float32)
self.actions_arg_screen_x = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen_x = tf.one_hot(self.actions_arg_screen_x,64,dtype=tf.float32)
self.actions_arg_screen_y = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen_y = tf.one_hot(self.actions_arg_screen_y,64,dtype=tf.float32)
self.actions_arg_screen2_x = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen2_x = tf.one_hot(self.actions_arg_screen2_x,64,dtype=tf.float32)
self.actions_arg_screen2_y = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_screen2_y = tf.one_hot(self.actions_arg_screen2_y,64,dtype=tf.float32)
self.actions_arg_select_point_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_point_act = tf.one_hot(self.actions_arg_select_point_act,4,dtype=tf.float32)
self.actions_arg_select_add = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_add = tf.one_hot(self.actions_arg_select_add,2,dtype=tf.float32)
self.actions_arg_control_group_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_control_group_act = tf.one_hot(self.actions_arg_control_group_act,5,dtype=tf.float32)
self.actions_arg_control_group_id = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_control_group_id = tf.one_hot(self.actions_arg_control_group_id,10,dtype=tf.float32)
self.actions_arg_select_unit_id = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_unit_id = tf.one_hot(self.actions_arg_select_unit_id,500,dtype=tf.float32)
self.actions_arg_select_unit_act = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_select_unit_act = tf.one_hot(self.actions_arg_select_unit_act,4,dtype=tf.float32)
self.actions_arg_queued = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot_arg_queued = tf.one_hot(self.actions_arg_queued,2,dtype=tf.float32)
self.target_v = tf.placeholder(shape=[None],dtype=tf.float32)
self.advantages = tf.placeholder(shape=[None],dtype=tf.float32)
self.responsible_outputs_base = tf.reduce_sum(self.policy_base_actions * self.actions_onehot_base, [1])
self.responsible_outputs_arg_screen_x = tf.reduce_sum(self.policy_arg_screen_x * self.actions_onehot_arg_screen_x, [1])
self.responsible_outputs_arg_screen_y = tf.reduce_sum(self.policy_arg_screen_y * self.actions_onehot_arg_screen_y, [1])
self.responsible_outputs_arg_screen2_x = tf.reduce_sum(self.policy_arg_screen2_x * self.actions_onehot_arg_screen2_x, [1])
self.responsible_outputs_arg_screen2_y = tf.reduce_sum(self.policy_arg_screen2_y * self.actions_onehot_arg_screen2_y, [1])
self.responsible_outputs_arg_select_point_act = tf.reduce_sum(self.policy_arg_select_point_act * self.actions_onehot_arg_select_point_act, [1])
self.responsible_outputs_arg_select_add = tf.reduce_sum(self.policy_arg_select_add * self.actions_onehot_arg_select_add, [1])
self.responsible_outputs_arg_control_group_act = tf.reduce_sum(self.policy_arg_control_group_act * self.actions_onehot_arg_control_group_act, [1])
self.responsible_outputs_arg_control_group_id = tf.reduce_sum(self.policy_arg_control_group_id * self.actions_onehot_arg_control_group_id, [1])
self.responsible_outputs_arg_select_unit_id = tf.reduce_sum(self.policy_arg_select_unit_id * self.actions_onehot_arg_select_unit_id, [1])
self.responsible_outputs_arg_select_unit_act = tf.reduce_sum(self.policy_arg_select_unit_act * self.actions_onehot_arg_select_unit_act, [1])
self.responsible_outputs_arg_queued = tf.reduce_sum(self.policy_arg_queued * self.actions_onehot_arg_queued, [1])
# Loss functions
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value,[-1])))
self.log_policy_base_actions = tf.log(tf.clip_by_value(self.policy_base_actions, 1e-20, 1.0)) # avoid NaN with clipping when value in policy becomes zero
self.entropy_base = - tf.reduce_sum(self.policy_base_actions * self.log_policy_base_actions)
self.entropy_arg_screen_x = - tf.reduce_sum(self.policy_arg_screen_x * tf.log(tf.clip_by_value(self.policy_arg_screen_x, 1e-20, 1.0)))
self.entropy_arg_screen_y = - tf.reduce_sum(self.policy_arg_screen_y * tf.log(tf.clip_by_value(self.policy_arg_screen_y, 1e-20, 1.0)))
self.entropy_arg_screen2_x = - tf.reduce_sum(self.policy_arg_screen2_x * tf.log(tf.clip_by_value(self.policy_arg_screen2_x, 1e-20, 1.0)))
self.entropy_arg_screen2_y = - tf.reduce_sum(self.policy_arg_screen2_y * tf.log(tf.clip_by_value(self.policy_arg_screen2_y, 1e-20, 1.0)))
self.entropy_arg_select_point_act = - tf.reduce_sum(self.policy_arg_select_point_act * tf.log(tf.clip_by_value(self.policy_arg_select_point_act, 1e-20, 1.0)))
self.entropy_arg_select_add = - tf.reduce_sum(self.policy_arg_select_add * tf.log(tf.clip_by_value(self.policy_arg_select_add, 1e-20, 1.0)))
self.entropy_arg_control_group_act = - tf.reduce_sum(self.policy_arg_control_group_act * tf.log(tf.clip_by_value(self.policy_arg_control_group_act, 1e-20, 1.0)))
self.entropy_arg_control_group_id = - tf.reduce_sum(self.policy_arg_control_group_id * tf.log(tf.clip_by_value(self.policy_arg_control_group_id, 1e-20, 1.0)))
self.entropy_arg_select_unit_id = - tf.reduce_sum(self.policy_arg_select_unit_id * tf.log(tf.clip_by_value(self.policy_arg_select_unit_id, 1e-20, 1.0)))
self.entropy_arg_select_unit_act = - tf.reduce_sum(self.policy_arg_select_unit_act * tf.log(tf.clip_by_value(self.policy_arg_select_unit_act, 1e-20, 1.0)))
self.entropy_arg_queued = - tf.reduce_sum(self.policy_arg_queued * tf.log(tf.clip_by_value(self.policy_arg_queued, 1e-20, 1.0)))
self.entropy = self.entropy_base + self.entropy_arg_screen_x + self.entropy_arg_screen_y + self.entropy_arg_screen2_x + self.entropy_arg_screen2_y + self.entropy_arg_select_point_act + self.entropy_arg_select_add + self.entropy_arg_control_group_act + self.entropy_arg_control_group_id + self.entropy_arg_select_unit_id + self.entropy_arg_select_unit_act + self.entropy_arg_queued
self.policy_loss_base = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_base, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen_x = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen_x, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen_y = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen_y, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen2_x = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen2_x, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_screen2_y = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_screen2_y, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_point_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_point_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_add = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_add, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_control_group_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_control_group_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_control_group_id = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_control_group_id, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_unit_id = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_unit_id, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_select_unit_act = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_select_unit_act, 1e-20, 1.0))*self.advantages)
self.policy_loss_arg_queued = - tf.reduce_sum(tf.log(tf.clip_by_value(self.responsible_outputs_arg_queued, 1e-20, 1.0))*self.advantages)
self.policy_loss = self.policy_loss_base + self.policy_loss_arg_screen_x + self.policy_loss_arg_screen_y + self.policy_loss_arg_screen2_x + self.policy_loss_arg_screen2_y + self.policy_loss_arg_select_point_act + self.policy_loss_arg_select_add + self.policy_loss_arg_control_group_act + self.policy_loss_arg_control_group_id + self.policy_loss_arg_select_unit_id + self.policy_loss_arg_select_unit_act + self.policy_loss_arg_queued
self.loss = 0.5 * self.value_loss + self.policy_loss - self.entropy * 0.01
# Get gradients from local network using local losses
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
self.gradients = tf.gradients(self.loss,local_vars)
self.var_norms = tf.global_norm(local_vars)
grads,self.grad_norms = tf.clip_by_global_norm(self.gradients,40.0)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.apply_grads = trainer.apply_gradients(zip(grads,global_vars))
## WORKER AGENT
class Worker():
def __init__(self,name,trainer,model_path,global_episodes):
self.name = "worker_" + str(name)
self.number = name
self.model_path = model_path
self.trainer = trainer
self.global_episodes = global_episodes
self.increment = self.global_episodes.assign_add(1)
self.episode_rewards = []
self.episode_lengths = []
self.episode_mean_values = []
self.summary_writer = tf.summary.FileWriter("train_"+str(self.number))
#Create the local copy of the network and the tensorflow op to copy global paramters to local network
self.local_AC = AC_Network(self.name,trainer)
self.update_local_ops = update_target_graph('global',self.name)
self.env = sc2_env.SC2Env(map_name="DefeatRoaches")
def train(self,rollout,sess,gamma,bootstrap_value):
rollout = np.array(rollout)
obs_screen = rollout[:,0]
obs_nonspatial = rollout[:,1]
actions_base = rollout[:,2]
actions_arg_screen_x = rollout[:,3]
actions_arg_screen_y = rollout[:,4]
actions_arg_screen2_x = rollout[:,5]
actions_arg_screen2_y = rollout[:,6]
actions_arg_select_point_act = rollout[:,7]
actions_arg_select_add = rollout[:,8]
actions_arg_control_group_act = rollout[:,9]
actions_arg_control_group_id = rollout[:,10]
actions_arg_select_unit_id = rollout[:,11]
actions_arg_select_unit_act = rollout[:,12]
actions_arg_queued = rollout[:,13]
rewards = rollout[:,14]
next_obs_screen = rollout[:,15]
next_obs_nonspatial = rollout[:,16]
values = rollout[:,18]
# Here we take the rewards and values from the rollout, and use them to calculate the advantage and discounted returns.
# The advantage function uses generalized advantage estimation from [2]
self.rewards_plus = np.asarray(rewards.tolist() + [bootstrap_value])
discounted_rewards = discount(self.rewards_plus,gamma)[:-1]
self.value_plus = np.asarray(values.tolist() + [bootstrap_value])
advantages = rewards + gamma * self.value_plus[1:] - self.value_plus[:-1]
advantages = discount(advantages,gamma)
# Update the global network using gradients from loss
# Generate network statistics to periodically save
feed_dict = {self.local_AC.target_v:discounted_rewards,
self.local_AC.inputs_spatial_screen_reshaped:np.stack(obs_screen).reshape(-1,64,64,7),
self.local_AC.inputs_nonspatial:np.stack(obs_nonspatial).reshape(-1,727),
self.local_AC.actions_base:actions_base,
self.local_AC.actions_arg_screen_x:actions_arg_screen_x,
self.local_AC.actions_arg_screen_y:actions_arg_screen_y,
self.local_AC.actions_arg_screen2_x:actions_arg_screen2_x,
self.local_AC.actions_arg_screen2_y:actions_arg_screen2_y,
self.local_AC.actions_arg_select_point_act:actions_arg_select_point_act,
self.local_AC.actions_arg_select_add:actions_arg_select_add,
self.local_AC.actions_arg_control_group_act:actions_arg_control_group_act,
self.local_AC.actions_arg_control_group_id:actions_arg_control_group_id,
self.local_AC.actions_arg_select_unit_id:actions_arg_select_unit_id,
self.local_AC.actions_arg_select_unit_act:actions_arg_select_unit_act,
self.local_AC.actions_arg_queued:actions_arg_queued,
self.local_AC.advantages:advantages}
v_l,p_l,e_l,g_n,v_n, _ = sess.run([self.local_AC.value_loss,
self.local_AC.policy_loss,
self.local_AC.entropy,
self.local_AC.grad_norms,
self.local_AC.var_norms,
self.local_AC.apply_grads],
feed_dict=feed_dict)
return v_l / len(rollout),p_l / len(rollout),e_l / len(rollout), g_n,v_n
def work(self,max_episode_length,gamma,sess,coord,saver):
episode_count = sess.run(self.global_episodes)
total_steps = 0
print ("Starting worker " + str(self.number))
with sess.as_default(), sess.graph.as_default():
while not coord.should_stop():
# Download copy of parameters from global network
sess.run(self.update_local_ops)
episode_buffer = []
episode_values = []
episode_frames = []
episode_reward = 0
episode_step_count = 0
d = False
# Start new episode
obs = self.env.reset()
episode_frames.append(obs[0])
reward, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0])
s_screen = screen_stack
s_nonspatial = nonspatial_stack
while not episode_end:
# Take an action using distributions from policy networks' outputs.
base_action_dist, screen_x_dist, screen_y_dist, screen2_x_dist, screen2_y_dist, select_point_act_dist,select_add_dist,control_group_act_dist,control_group_id_dist,select_unit_id_dist,select_unit_act_dist,queued_dist,v = sess.run([
self.local_AC.policy_base_actions,
self.local_AC.policy_arg_screen_x,
self.local_AC.policy_arg_screen_y,
self.local_AC.policy_arg_screen2_x,
self.local_AC.policy_arg_screen2_y,
self.local_AC.policy_arg_select_point_act,
self.local_AC.policy_arg_select_add,
self.local_AC.policy_arg_control_group_act,
self.local_AC.policy_arg_control_group_id,
self.local_AC.policy_arg_select_unit_id,
self.local_AC.policy_arg_select_unit_act,
self.local_AC.policy_arg_queued,
self.local_AC.value],
feed_dict={self.local_AC.inputs_spatial_screen_reshaped: screen_stack,
self.local_AC.inputs_nonspatial: nonspatial_stack})
# Apply filter to remove unavailable actions and then renormalize
index2action_id = {0:0, 1:1, 2:2, 3:3, 4:4, 5:5, 6:7, 7:12, 8:13, 9:274, 10:331, 11:332, 12:333, 13:334, 14:451, 15:452, 16:453}
for index, action in enumerate(base_action_dist[0]):
action_id = index2action_id[index]
if action_id not in obs[0].observation['available_actions']:
base_action_dist[0][index] = 0
if np.sum(base_action_dist[0]) != 1:
current_sum = np.sum(base_action_dist[0])
base_action_dist[0] /= current_sum
base_action = sample_dist(base_action_dist)
arg_screen_x = sample_dist(screen_x_dist)
arg_screen_y = sample_dist(screen_y_dist)
arg_screen2_x = sample_dist(screen2_x_dist)
arg_screen2_y = sample_dist(screen2_y_dist)
arg_select_point_act = sample_dist(select_point_act_dist)
arg_select_add = sample_dist(select_add_dist)
arg_control_group_act = sample_dist(control_group_act_dist)
arg_control_group_id = sample_dist(control_group_id_dist)
arg_select_unit_id = sample_dist(select_unit_id_dist)
arg_select_unit_act = sample_dist(select_unit_act_dist)
arg_queued = sample_dist(queued_dist)
# 17 relevant base actions
if base_action == 0:
# 0/no_op
action_id = 0
arguments = []
elif base_action == 1:
# 1/move_camera
action_id = 1
arguments = [[arg_screen_x, arg_screen_y]]
elif base_action == 2:
# 2/select_point
action_id = 2
arguments = [[arg_select_point_act],[arg_screen_x, arg_screen_y]]
elif base_action == 3:
# 3/select_rect
action_id = 3
arguments = [[arg_select_add],[arg_screen_x, arg_screen_y],[arg_screen2_x, arg_screen2_y]]
elif base_action == 4:
# 4/select_control_group
action_id = 4
arguments = [[arg_control_group_act],[arg_control_group_id]]
elif base_action == 5:
# 5/select_unit
action_id = 5
arguments = [[arg_select_unit_act],[arg_select_unit_id]]
elif base_action == 6:
# 7/select_army
action_id = 7
arguments = [[arg_select_add]]
elif base_action == 7:
# 12/Attack_screen
action_id = 12
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 8:
# 13/Attack_minimap
action_id = 13
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 9:
# 274/HoldPosition_quick
action_id = 274
arguments = [[arg_queued]]
elif base_action == 10:
# 331/Move_screen
action_id = 331
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 11:
# 332/Move_minimap
action_id = 332
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 12:
# 333/Patrol_screen
action_id = 333
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 13:
# 334/Patrol_minimap
action_id = 334
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 14:
# 451/Smart_screen
action_id = 451
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 15:
# 452/Smart_minimap
action_id = 452
arguments = [[arg_queued],[arg_screen_x, arg_screen_y]]
elif base_action == 16:
# 453/Stop_quick
action_id = 453
arguments = [[arg_queued]]
a = actions.FunctionCall(action_id, arguments)
obs = self.env.step(actions=[a])
r, nonspatial_stack, minimap_stack, screen_stack, episode_end = process_observation(obs[0])
if not episode_end:
episode_frames.append(obs[0])
s1_screen = screen_stack
s1_nonspatial = nonspatial_stack
else:
s1_screen = s_screen
s1_nonspatial = s_nonspatial
# Append latest state to buffer
episode_buffer.append([s_screen, s_nonspatial,base_action,arg_screen_x,arg_screen_y,arg_screen2_x,arg_screen2_y,arg_select_point_act,arg_select_add,arg_control_group_act,arg_control_group_id,arg_select_unit_id,arg_select_unit_act,arg_queued,r,s1_screen, s1_nonspatial,d,v[0,0]])
episode_values.append(v[0,0])
episode_reward += r
s_screen = s1_screen
s_nonspatial = s1_nonspatial
total_steps += 1
episode_step_count += 1
global _steps
_steps += 1
# If the episode hasn't ended, but the experience buffer is full, then we make an update step using that experience rollout.
if len(episode_buffer) == 30 and not episode_end and episode_step_count != max_episode_length - 1:
# Since we don't know what the true final return is, we "bootstrap" from our current value estimation.
v1 = sess.run(self.local_AC.value,
feed_dict={self.local_AC.inputs_spatial_screen_reshaped: screen_stack,self.local_AC.inputs_nonspatial: nonspatial_stack})[0,0]
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,v1)
episode_buffer = []
sess.run(self.update_local_ops)
if episode_end:
break
self.episode_rewards.append(episode_reward)
self.episode_lengths.append(episode_step_count)
self.episode_mean_values.append(np.mean(episode_values))
episode_count += 1
global _max_score, _running_avg_score, _episodes
if _max_score < episode_reward:
_max_score = episode_reward
_running_avg_score = (2.0 / 101) * (episode_reward - _running_avg_score) + _running_avg_score
_episodes += 1
print("{} Step #{} Episode #{} Reward: {}".format(self.name, total_steps, episode_count, episode_reward))
print("Total Steps: {}\tTotal Episodes: {}\tMax Score: {}\tAvg Score: {}".format(_steps, _episodes, _max_score, _running_avg_score))
# Update the network using the episode buffer at the end of the episode.
if len(episode_buffer) != 0:
v_l,p_l,e_l,g_n,v_n = self.train(episode_buffer,sess,gamma,0.0)
if episode_count % 5 == 0 and episode_count != 0:
if self.name == 'worker_0' and episode_count % 25 == 0:
time_per_step = 0.05
images = np.array(episode_frames)
if episode_count % 250 == 0 and self.name == 'worker_0':
saver.save(sess,self.model_path+'/model-'+str(episode_count)+'.cptk')
print ("Saved Model")
mean_reward = np.mean(self.episode_rewards[-5:])
mean_length = np.mean(self.episode_lengths[-5:])
mean_value = np.mean(self.episode_mean_values[-5:])
summary = tf.Summary()
summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
summary.value.add(tag='Perf/Value', simple_value=float(mean_value))
summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
self.summary_writer.add_summary(summary, episode_count)
self.summary_writer.flush()
if self.name == 'worker_0':
sess.run(self.increment)
def main():
max_episode_length = 300
gamma = .99 # discount rate for advantage estimation and reward discounting
load_model = True
model_path = './_model_old'
global _max_score, _running_avg_score, _steps, _episodes
_max_score = -9
_running_avg_score = -9
_steps = 0
_episodes = 0
tf.reset_default_graph()
if not os.path.exists(model_path):
os.makedirs(model_path)
with tf.device("/cpu:0"):
global_episodes = tf.Variable(0,dtype=tf.int32,name='global_episodes',trainable=False)
trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
master_network = AC_Network('global',None) # Generate global network
num_workers = psutil.cpu_count() # Set workers to number of available CPU threads
num_workers = 1
workers = []
# Create worker classes
for i in range(num_workers):
workers.append(Worker(i,trainer,model_path,global_episodes))
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if load_model == True:
print ('Loading Model...')
ckpt = tf.train.get_checkpoint_state(model_path)
saver.restore(sess,ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
# This is where the asynchronous magic happens.
# Start the "work" process for each worker in a separate thread.
worker_threads = []
for worker in workers:
worker_work = lambda: worker.work(max_episode_length,gamma,sess,coord,saver)
t = threading.Thread(target=(worker_work))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
if __name__ == '__main__':
import sys
from absl import flags
FLAGS = flags.FLAGS
FLAGS(sys.argv)
main()
|
A3CtypeAD.py
|
'''
Type anomaly detection file
'''
import tensorflow as tf
import threading
import multiprocessing
import os
import shutil
import itertools
from my_enviroment import my_env
from estimators import ValueEstimator, PolicyEstimator
from policy_monitor import PolicyMonitor
from worker import Worker
def del_all_flags(FLAGS):
flags_dict = FLAGS._flags()
keys_list = [keys for keys in flags_dict]
for keys in keys_list:
FLAGS.__delattr__(keys)
del_all_flags(tf.flags.FLAGS)
tf.logging.set_verbosity(tf.logging.INFO)
tf.flags.DEFINE_string("model_dir", "tmp/a3c", "Directory to write Tensorboard summaries and videos to.")
tf.flags.DEFINE_integer("t_max", 2, "Number of steps before performing an update")
tf.flags.DEFINE_integer("max_global_steps", None, "Stop training after this many steps in the environment. Defaults to running indefinitely.")
tf.flags.DEFINE_integer("eval_every", 180, "Evaluate the policy every N seconds")
tf.flags.DEFINE_boolean("reset", True, "If set, delete the existing model directory and start training from scratch.")
tf.flags.DEFINE_integer("parallelism", None, "Number of threads to run. If not set we run [num_cpu_cores] threads.")
FLAGS = tf.flags.FLAGS
# Initialization of the enviroment
def make_env():
kdd_train = '../../datasets/NSL/KDDTrain+.txt'
kdd_test = '../../datasets/NSL/KDDTest+.txt'
formated_train_path = "../../datasets/formated/formated_train_type.data"
formated_test_path = "../../datasets/formated/formated_test_type.data"
batch_size = 1
fails_episode = 10 # number of fails in a episode
env = my_env('train',train_path=kdd_train,test_path=kdd_test,
formated_train_path = formated_train_path,
formated_test_path = formated_test_path,
batch_size=batch_size,
fails_episode=fails_episode)
return env
env_ = make_env()
VALID_ACTIONS = list(range(env_.action_space))
# Set the number of workers
NUM_WORKERS = multiprocessing.cpu_count()
if FLAGS.parallelism:
NUM_WORKERS = FLAGS.parallelism
MODEL_DIR = FLAGS.model_dir
CHECKPOINT_DIR = os.path.join(MODEL_DIR, "checkpoints")
# Optionally empty model directory
if FLAGS.reset:
shutil.rmtree(MODEL_DIR, ignore_errors=True)
if not os.path.exists(CHECKPOINT_DIR):
os.makedirs(CHECKPOINT_DIR)
summary_writer = tf.summary.FileWriter(os.path.join(MODEL_DIR, "train"))
with tf.device("/cpu:0"):
# Keeps track of the number of updates we've performed
global_step = tf.Variable(0, name="global_step", trainable=False)
# Global policy and value nets
with tf.variable_scope("global") as vs:
policy_net = PolicyEstimator(num_outputs=len(VALID_ACTIONS),
observation_space=env_.observation_space)
value_net = ValueEstimator(observation_space=env_.observation_space,
reuse=True)
# Global step iterator
global_counter = itertools.count()
# Create worker graphs
workers = []
for worker_id in range(NUM_WORKERS):
# We only write summaries in one of the workers because they're
# pretty much identical and writing them on all workers
# would be a waste of space
worker_summary_writer = None
if worker_id == 0:
worker_summary_writer = summary_writer
worker = Worker(name="worker_{}".format(worker_id),
env=make_env(),
policy_net=policy_net,
value_net=value_net,
global_counter=global_counter,
discount_factor = 0.005,
summary_writer=worker_summary_writer,
max_global_steps=FLAGS.max_global_steps)
workers.append(worker)
saver = tf.train.Saver(keep_checkpoint_every_n_hours=0.5, max_to_keep=10)
# Used to occasionally save videos for our policy net
# and write episode rewards to Tensorboard
pe = PolicyMonitor(
env=make_env(),
policy_net=policy_net,
summary_writer=summary_writer,
saver=saver)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
# Load a previous checkpoint if it exists
latest_checkpoint = tf.train.latest_checkpoint(CHECKPOINT_DIR)
if latest_checkpoint:
print("Loading model checkpoint: {}".format(latest_checkpoint))
saver.restore(sess, latest_checkpoint)
# Start worker threads
worker_threads = []
for worker in workers:
worker_fn = lambda worker=worker: worker.run(sess, coord, FLAGS.t_max)
t = threading.Thread(target=worker_fn)
t.start()
worker_threads.append(t)
# Start a thread for policy eval task
monitor_thread = threading.Thread(target=lambda: pe.continuous_eval(FLAGS.eval_every, sess, coord))
monitor_thread.start()
# Wait for all workers to finish
coord.join(worker_threads)
|
isaac_ros_server.py
|
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Header, Float32MultiArray, Float32
import threading
class isaac_ros_server():
def __init__(self) -> None:
print("Initializing node... ")
rospy.init_node("isaac_ros_server")
print("Initializing Done... ")
self.force_sensor = Float32MultiArray()
rospy.Subscriber("force", Float32MultiArray, self.ForceCallback)
self.pub = rospy.Publisher('joint_states', JointState, queue_size=1)
def thread_job():
while not self.thread_stop:
rospy.spin()
self.thread = threading.Thread(target = thread_job)
self.thread_stop = False
self.thread.start()
def clean_shutdown():
self.thread_stop = True
rospy.on_shutdown(clean_shutdown)
def ForceCallback(self, force_sensor):
self.force_sensor = force_sensor.data
print(self.force_sensor)
def joint_states_server(self, joint_position):
rate = rospy.Rate(10) # 10hz
joint_states = JointState()
joint_states.header = Header()
joint_states.header.stamp = rospy.Time.now()
joint_states.name = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'lfinger', 'rfinger']
joint_states.position = joint_position
joint_states.velocity = []
joint_states.effort = []
self.pub.publish(joint_states)
rate.sleep()
if __name__ == '__main__':
try:
i_s = isaac_ros_server()
i_s.joint_states_server()
except rospy.ROSInterruptException:
pass
|
main_ui.py
|
# -*- coding: utf-8 -*-
from sys import exit
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from capture_core import *
# 使用matplotlib绘制柱状图
import numpy as np
import matplotlib.pyplot as plt
import json
from monitor_system import start_monitor
from forged_packet import startForged
from multiprocessing import Process
class Ui_MainWindow(QMainWindow):
core = None
timer = None
Monitor = None
Forged = None
def setupUi(self):
self.setWindowTitle("WireWhale")
self.resize(950, 580)
#设置程序图标
icon = QIcon()
icon.addPixmap(QPixmap("img/shark.jpg"), QIcon.Normal, QIcon.Off)
self.setWindowIcon(icon)
self.setIconSize(QSize(20, 20))
#中间布局,设为透明
self.centralWidget = QWidget(self)
self.centralWidget.setStyleSheet("background:transparent;")
#栅栏布局,使得窗口自适应
self.gridLayout = QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(6)
#顶部控件布局
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setContentsMargins(10, 2, 10, 1)
self.horizontalLayout.setSpacing(20)
#三个显示区布局
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setContentsMargins(10, 0, 3, 10)
self.verticalLayout.setSpacing(6)
# 初始主窗口字体
font = QFont()
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_font = json.load(file_obj) # 返回列表数据,也支持字典
if old_font["font"]:
font.setFamily(old_font["font"])
font.setPointSize(int(old_font["size"]))
else:
if platform == 'Windows':
font.setFamily("Lucida Sans Typewriter")
old_font["font"] = "Lucida Sans Typewriter"
if platform == "Linux":
font.setFamily("Noto Mono")
old_font["font"] = "Noto Mono"
font.setPointSize(11)
with open('data.json', 'w') as file_obj:
'''写入json文件'''
json.dump(old_font, file_obj)
#数据包显示框
self.info_tree = QTreeWidget(self.centralWidget)
self.info_tree.setFrameStyle(QFrame.Box | QFrame.Plain)
self.info_tree.setAutoScroll(True)
self.info_tree.setRootIsDecorated(False)
self.info_tree.setFont(font)
self.info_tree.setColumnCount(7) #设置表格为7列
#固定行高,取消每次刷新所有行,避免更新数据时不流畅
self.info_tree.setUniformRowHeights(True)
#设置表头
self.info_tree.headerItem().setText(0, "No.")
self.info_tree.headerItem().setText(1, "Time")
self.info_tree.headerItem().setText(2, "Source")
self.info_tree.headerItem().setText(3, "Destination")
self.info_tree.headerItem().setText(4, "Protocol")
self.info_tree.headerItem().setText(5, "Length")
self.info_tree.headerItem().setText(6, "Info")
self.info_tree.setStyleSheet("background:transparent;")
self.info_tree.setSortingEnabled(True)
self.info_tree.sortItems(0, Qt.AscendingOrder)
self.info_tree.setColumnWidth(0, 75)
self.info_tree.setColumnWidth(1, 130)
self.info_tree.setColumnWidth(2, 150)
self.info_tree.setColumnWidth(3, 150)
self.info_tree.setColumnWidth(4, 85)
self.info_tree.setColumnWidth(5, 60)
for i in range(7):
self.info_tree.headerItem().setBackground(i,
QBrush(QColor(Qt.white)))
self.info_tree.setSelectionBehavior(
QTreeWidget.SelectRows) #设置选中时为整行选中
self.info_tree.setSelectionMode(QTreeWidget.SingleSelection) #设置只能选中一行
"""显示排序图标"""
self.info_tree.header().setSortIndicatorShown(True)
self.info_tree.clicked.connect(self.on_tableview_clicked)
#数据包详细内容显示框
self.treeWidget = QTreeWidget(self.centralWidget)
self.treeWidget.setAutoScroll(True)
self.treeWidget.setTextElideMode(Qt.ElideMiddle)
self.treeWidget.header().setStretchLastSection(True)
self.treeWidget.setStyleSheet("background:transparent; color:white;")
self.treeWidget.header().hide()
self.treeWidget.setFont(font)
# 设为只有一列
self.treeWidget.setColumnCount(1)
self.treeWidget.setFrameStyle(QFrame.Box | QFrame.Plain)
#hex显示区域
self.hexBrowser = QTextBrowser(self.centralWidget)
self.hexBrowser.setText("")
self.hexBrowser.setFont(font)
self.hexBrowser.setStyleSheet("background:transparent; color:white;")
self.hexBrowser.setFrameStyle(QFrame.Box | QFrame.Plain)
# 允许用户通过拖动三个显示框的边界来控制子组件的大小
self.splitter = QSplitter(Qt.Vertical)
self.splitter.addWidget(self.info_tree)
self.splitter.addWidget(self.treeWidget)
self.splitter.addWidget(self.hexBrowser)
self.verticalLayout.addWidget(self.splitter)
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
#过滤器输入框
self.Filter = QLineEdit(self.centralWidget)
self.Filter.setPlaceholderText("Apply a capture filter … ")
self.Filter.setStyleSheet("background:white")
self.Filter.setFont(font)
self.horizontalLayout.addWidget(self.Filter)
#过滤器按钮
self.FilterButton = QPushButton(self.centralWidget)
self.FilterButton.setText("开始")
icon1 = QIcon()
icon1.addPixmap(QPixmap("img/go.png"), QIcon.Normal, QIcon.Off)
self.FilterButton.setIcon(icon1)
self.FilterButton.setIconSize(QSize(20, 20))
self.FilterButton.setStyleSheet("background:white")
self.FilterButton.clicked.connect(self.on_start_action_clicked)
self.horizontalLayout.addWidget(self.FilterButton)
"""
网卡选择框
"""
self.choose_nicbox = QComboBox(self.centralWidget)
self.choose_nicbox.setFont(font)
self.choose_nicbox.setStyleSheet("background:white; color:black;")
self.horizontalLayout.addWidget(self.choose_nicbox)
self.horizontalLayout.setStretch(0, 8)
self.horizontalLayout.setStretch(1, 1)
self.horizontalLayout.setStretch(2, 4)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
"""初始网卡复选框"""
row_num = len(keys)
self.choose_nicbox.addItem("All")
for i in range(row_num):
self.choose_nicbox.addItem(keys[i])
self.setCentralWidget(self.centralWidget)
"""
顶部菜单栏
"""
self.menuBar = QMenuBar(self)
self.menuBar.setGeometry(QRect(0, 0, 953, 23))
self.menuBar.setAccessibleName("")
self.menuBar.setDefaultUp(True)
self.menu_F = QMenu(self.menuBar)
self.menu_F.setTitle("文件(F)")
self.edit_menu = QMenu(self.menuBar)
self.edit_menu.setTitle("编辑(E)")
self.capture_menu = QMenu(self.menuBar)
self.capture_menu.setTitle("捕获(C)")
self.menu_H = QMenu(self.menuBar)
self.menu_H.setTitle("帮助(H)")
self.menu_Analysis = QMenu(self.menuBar)
self.menu_Analysis.setTitle("分析(A)")
self.menu_Statistic = QMenu(self.menuBar)
self.menu_Statistic.setTitle("统计(S)")
self.setMenuBar(self.menuBar)
#顶部工具栏
self.mainToolBar = QToolBar(self)
self.addToolBar(Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QStatusBar(self)
self.mainToolBar.setStyleSheet("background: #EDEDED;")
self.mainToolBar.setMaximumHeight(25)
self.setStatusBar(self.statusBar)
#字体设置键
font_set = QAction(self)
font_set.setText("主窗口字体")
font_set.triggered.connect(self.on_font_set_clicked)
#背景图片设置
change_border = QAction(self)
change_border.setText("背景图片")
change_border.triggered.connect(self.on_change_border_clicked)
#开始键
self.start_action = QAction(self)
icon2 = QIcon()
icon2.addPixmap(QPixmap("img/start.png"), QIcon.Normal, QIcon.Off)
self.start_action.setIcon(icon2)
self.start_action.setText("开始")
self.start_action.setShortcut('F1')
self.start_action.triggered.connect(self.on_start_action_clicked)
#停止键
self.stop_action = QAction(self)
icon3 = QIcon()
icon3.addPixmap(QPixmap("img/stop.png"), QIcon.Normal, QIcon.Off)
self.stop_action.setIcon(icon3)
self.stop_action.setText("停止")
self.stop_action.setShortcut('F3')
self.stop_action.setDisabled(True) #开始时该按钮不可点击
self.stop_action.triggered.connect(self.on_stop_action_clicked)
#暂停键
self.pause_action = QAction(self)
p_icon = QIcon()
p_icon.addPixmap(QPixmap("img/pause.png"), QIcon.Normal, QIcon.Off)
self.pause_action.setIcon(p_icon)
self.pause_action.setText("暂停")
self.pause_action.setShortcut('F2')
self.pause_action.setDisabled(True) # 开始时该按钮不可点击
self.pause_action.triggered.connect(self.on_pause_action_clicked)
#重新开始键
self.actionRestart = QAction(self)
icon4 = QIcon()
icon4.addPixmap(QPixmap("img/restart.png"), QIcon.Normal, QIcon.Off)
self.actionRestart.setIcon(icon4)
self.actionRestart.setText("重新开始")
self.actionRestart.setShortcut('F4')
self.actionRestart.setDisabled(True) # 开始时该按钮不可点击
self.actionRestart.triggered.connect(self.on_actionRestart_clicked)
#更新数据键
self.action_update = QAction(self)
icon5 = QIcon()
icon5.addPixmap(QPixmap("img/update.png"), QIcon.Normal, QIcon.Off)
self.action_update.setIcon(icon5)
self.action_update.setText("继续更新")
self.action_update.setShortcut('F5')
self.action_update.setDisabled(True)
self.action_update.triggered.connect(
lambda: self.timer.start(flush_time) and self.action_update.setDisabled(True)
)
#帮助文档
action_readme = QAction(self)
action_readme.setText("使用文档")
action_about = QAction(self)
action_about.setText("关于")
action_about.triggered.connect(self.on_action_about_clicked)
#打开文件键
action_openfile = QAction(self)
action_openfile.setText("打开")
action_openfile.setShortcut("ctrl+O")
action_openfile.triggered.connect(self.on_action_openfile_clicked)
#保存文件键
action_savefile = QAction(self)
action_savefile.setText("保存")
action_savefile.setShortcut("ctrl+S")
action_savefile.triggered.connect(self.on_action_savefile_clicked)
#退出键
self.action_exit = QAction(self)
self.action_exit.setCheckable(False)
self.action_exit.setText("退出")
self.action_exit.triggered.connect(self.on_action_exit_clicked)
self.action_exit.setShortcut('ctrl+Q')
self.action_exit.setStatusTip('退出应用程序')
#构造包
self.forged_action = QAction(self)
self.forged_action.setText("伪造包")
self.forged_action.setShortcut('F7')
self.forged_action.triggered.connect(self.forged_action_clicked)
#流量监测
self.action_track = QAction(self)
self.action_track.setText("流量监测")
self.action_track.setShortcut('F6')
self.action_track.triggered.connect(self.on_action_track_clicked)
#IP地址类型统计图
self.IP_statistics = QAction(self)
self.IP_statistics.setText("IP地址类型统计")
self.IP_statistics.triggered.connect(self.on_IP_statistics_clicked)
#报文类型统计图
self.message_statistics = QAction(self)
self.message_statistics.setText("报文类型统计")
self.message_statistics.triggered.connect(
self.on_message_statistics_clicked)
"""
添加工具栏:开始,暂停,停止,重新开始
"""
self.mainToolBar.addAction(self.start_action)
self.mainToolBar.addAction(self.pause_action)
self.mainToolBar.addAction(self.stop_action)
self.mainToolBar.addAction(self.actionRestart)
self.mainToolBar.addAction(self.action_update)
self.menu_F.addAction(action_openfile)
self.menu_F.addAction(action_savefile)
self.menu_F.addAction(self.action_exit)
self.menu_F.showFullScreen()
self.edit_menu.addAction(font_set)
self.edit_menu.addAction(change_border)
#捕获菜单栏添加子菜单
self.capture_menu.addAction(self.start_action)
self.capture_menu.addAction(self.pause_action)
self.capture_menu.addAction(self.stop_action)
self.capture_menu.addAction(self.actionRestart)
self.menu_H.addAction(action_readme)
self.menu_H.addAction(action_about)
self.menu_Analysis.addAction(self.forged_action)
self.menu_Analysis.addAction(self.action_track)
self.menu_Statistic.addAction(self.IP_statistics)
self.menu_Statistic.addAction(self.message_statistics)
self.menuBar.addAction(self.menu_F.menuAction())
self.menuBar.addAction(self.edit_menu.menuAction())
self.menuBar.addAction(self.capture_menu.menuAction())
self.menuBar.addAction(self.menu_Analysis.menuAction())
self.menuBar.addAction(self.menu_Statistic.menuAction())
self.menuBar.addAction(self.menu_H.menuAction())
# self.statusBar.showMessage('实时更新的信息', 0) # 状态栏本身显示的信息 第二个参数是信息停留的时间,单位是毫秒,默认是0(0表示在下一个操作来临前一直显示)
"""底部状态栏
利用self.comNum.setText()实时更新状态栏信息
"""
self.comNum = QLabel('下载速度:')
self.baudNum = QLabel('上传速度:')
self.getSpeed = QLabel('收包速度:')
self.sendSpeed = QLabel('发包速度:')
self.netNic = QLabel('Welcome to WireWhale! ^ _ ^')
self.statusBar.setStyleSheet("background: #EDEDED;")
"""各个单元空间占比"""
self.statusBar.addPermanentWidget(self.netNic, stretch=2)
self.statusBar.addPermanentWidget(self.getSpeed, stretch=1)
self.statusBar.addPermanentWidget(self.sendSpeed, stretch=1)
self.statusBar.addPermanentWidget(self.comNum, stretch=1)
self.statusBar.addPermanentWidget(self.baudNum, stretch=1)
QMetaObject.connectSlotsByName(self)
self.core = Core(self)
# 设置定时器将抓包列表置底
self.timer = QTimer(self)
self.timer.timeout.connect(self.info_tree.scrollToBottom)
self.show()
"""
重写窗口关闭事件
"""
def closeEvent(self, QCloseEvent):
def close_to_do():
self.core.clean_out()
if self.Monitor and self.Monitor.is_alive():
self.Monitor.terminate()
if self.Forged and self.Forged.is_alive():
self.Forged.terminate()
exit()
if self.core.start_flag or self.core.pause_flag:
# 没有停止抓包
reply = QMessageBox.question(
self, 'Message', "您是否要停止捕获,并保存已捕获的分组?\n警告:若不保存,您捕获的分组将会丢失",
QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,
QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
QCloseEvent.ignore()
if reply == QMessageBox.Close:
self.core.stop_capture()
close_to_do()
elif reply == QMessageBox.Save:
self.core.stop_capture()
self.on_action_savefile_clicked()
close_to_do()
elif self.core.stop_flag and not self.core.save_flag:
"""
已停止,但没有保存文件
"""
reply = QMessageBox.question(
self, 'Message', "您是否保存已捕获的分组?\n警告:若不保存,您捕获的分组将会丢失",
QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,
QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
QCloseEvent.ignore()
elif reply == QMessageBox.Save:
self.on_action_savefile_clicked()
close_to_do()
else:
close_to_do()
elif self.core.save_flag or not self.core.start_flag:
"""
未工作状态
"""
reply = QMessageBox.question(self, 'Message', "您是否要退出本程序?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
close_to_do()
else:
QCloseEvent.ignore()
"""绘制背景"""
def paintEvent(self, a0: QPaintEvent):
painter = QPainter(self)
pixmap = QPixmap("img/Whale1.jpg")
painter.drawPixmap(self.rect(), pixmap)
"""
数据包视图 数据记录点击事件
点击列表中一条记录时,在下面的frame框中显示帧的详细信息
"""
def on_tableview_clicked(self):
selected_row = self.info_tree.currentItem().text(0) #当前选择的编号
#表格停止追踪更新
if selected_row and selected_row.isdigit():
self.timer.stop()
self.show_infoTree((int)(selected_row))
if not self.core.pause_flag and not self.core.stop_flag:
self.action_update.setDisabled(False)
"""
展开帧的详细信息
"""
def show_infoTree(self, selected_row):
"""
清空Frame Information内容
"""
self.treeWidget.clear()
"""
添加树节点
Item1: 第一层树节点
Item1_1: 第二层树节点,Item1的子节点
QTreeWidgetItem(parentNode, text) parentNode:父节点 text:当前节点内容
"""
parentList, childList, hex_dump = self.core.on_click_item(selected_row)
p_num = len(parentList)
for i in range(p_num):
item1 = QTreeWidgetItem(self.treeWidget)
item1.setText(0, parentList[i])
c_num = len(childList[i])
for j in range(c_num):
item1_1 = QTreeWidgetItem(item1)
item1_1.setText(0, childList[i][j])
self.set_hex_text(hex_dump)
"""
获取当前选择的网卡
"""
def get_choose_nic(self):
card = self.choose_nicbox.currentText()
self.netNic.setText('当前网卡:' + card)
if (card == 'All'):
a = None
elif platform == 'Windows':
a = netcards[card]
elif platform == 'Linux':
a = card
else:
a = None
return a
"""
设置hex区文本
"""
def set_hex_text(self, text):
self.hexBrowser.setText(text)
"""
设置字体点击事件
"""
def on_font_set_clicked(self):
font, ok = QFontDialog.getFont()
if ok:
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_font = json.load(file_obj) # 返回列表数据,也支持字典
old_font["font"] = font.family()
old_font["size"] = font.pointSize()
with open('data.json', 'w') as file:
json.dump(old_font, file)
self.info_tree.setFont(font)
self.treeWidget.setFont(font)
self.hexBrowser.setFont(font)
"""
设置背景图片
"""
def on_change_border_clicked(self):
imgName, imgType = QFileDialog.getOpenFileName(
self, "打开图片", "C:/", "*.jpg;;*.png;;All Files(*)")
with open('data.json', 'r') as file_obj:
'''读取json文件'''
old_image = json.load(file_obj) # 返回列表数据,也支持字典
old_image["imageUrl"] = imgName
with open('data.json', 'w') as file:
json.dump(old_image, file)
window_pale = QPalette()
window_pale.setBrush(self.backgroundRole(), QBrush(QPixmap(imgName)))
self.setPalette(window_pale)
"""
开始键点击事件
"""
def on_start_action_clicked(self):
if self.core.stop_flag:
# 重新开始清空面板内容
self.info_tree.clear()
self.treeWidget.clear()
self.set_hex_text("")
self.core.start_capture(self.get_choose_nic(), self.Filter.text())
"""
点击开始后,过滤器不可编辑,开始按钮、网卡选择框全部设为不可选
激活暂停、停止键、重新开始键
"""
self.start_action.setDisabled(True)
self.Filter.setEnabled(False)
self.FilterButton.setEnabled(False)
self.choose_nicbox.setEnabled(False)
self.actionRestart.setDisabled(False)
self.pause_action.setEnabled(True)
self.stop_action.setEnabled(True)
self.timer.start(flush_time)
"""
暂停事件点击事件
"""
def on_pause_action_clicked(self):
self.core.pause_capture()
"""
激活开始、停止、重新开始键、过滤器、网卡选择框
"""
self.start_action.setEnabled(True)
self.stop_action.setDisabled(False)
self.actionRestart.setDisabled(False)
self.Filter.setDisabled(True)
self.FilterButton.setDisabled(True)
self.choose_nicbox.setDisabled(False)
self.pause_action.setDisabled(True)
self.action_update.setDisabled(True)
self.timer.stop()
"""
菜单栏停止键点击事件
"""
def on_stop_action_clicked(self):
self.core.stop_capture()
"""
激活开始键、重新开始键、过滤器、网卡选择框
"""
self.stop_action.setDisabled(True)
self.pause_action.setDisabled(True)
self.start_action.setEnabled(True)
self.Filter.setDisabled(False)
self.FilterButton.setDisabled(False)
self.choose_nicbox.setDisabled(False)
self.action_update.setDisabled(True)
self.timer.stop()
"""
重新开始键响应事件
"""
def on_actionRestart_clicked(self):
# 重新开始清空面板内容
self.timer.stop()
self.core.restart_capture(self.get_choose_nic(), self.Filter.text())
self.info_tree.clear()
self.treeWidget.clear()
self.set_hex_text("")
"""
点击开始后,过滤器不可编辑,开始按钮,网卡选择框全部设为不可选
激活暂停、停止键、重新开始键
"""
self.actionRestart.setDisabled(False)
self.start_action.setDisabled(True)
self.Filter.setEnabled(False)
self.FilterButton.setEnabled(False)
self.choose_nicbox.setEnabled(False)
self.pause_action.setEnabled(True)
self.stop_action.setEnabled(True)
self.timer.start(flush_time)
"""
IP地址类型统计图绘制
"""
def on_IP_statistics_clicked(self):
IP = self.core.get_network_count()
IPv4_count = IP["ipv4"]
IPv6_count = IP["ipv6"]
IP_count = IPv4_count + IPv6_count
if IP_count == 0:
reply = QMessageBox.information(self, "提示", "你还没有抓包!",
QMessageBox.Cancel)
else:
IPv4_fre = IPv4_count / IP_count
IPv6_fre = IPv6_count / IP_count
data = {
'IPv4': (IPv4_fre, '#7199cf'),
'IPv6': (IPv6_fre, '#4fc4aa'),
}
fig = plt.figure(figsize=(6, 4))
# 创建绘图区域
ax1 = fig.add_subplot(111)
ax1.set_title('IPv4 & IPv6 Statistical Chart')
# 生成x轴的每个元素的位置,列表是[1,2,3,4]
xticks = np.arange(1, 3)
# 自定义柱状图的每个柱的宽度
bar_width = 0.6
IP_type = data.keys()
values = [x[0] for x in data.values()]
colors = [x[1] for x in data.values()]
# 画柱状图,设置柱的边缘为透明
bars = ax1.bar(xticks, values, width=bar_width, edgecolor='none')
# 设置y轴的标签
ax1.set_ylabel('Proportion')
ax1.set_xticks(xticks)
ax1.set_xticklabels(IP_type)
# 设置x,y轴的范围
ax1.set_xlim([0, 3.5])
ax1.set_ylim([0, 1])
# 给每一个bar分配颜色
for bar, color in zip(bars, colors):
bar.set_color(color)
plt.show()
"""
数据包类型数量统计
"""
def on_message_statistics_clicked(self):
trans = self.core.get_transport_count()
TCP_count = trans["tcp"]
UDP_count = trans["udp"]
ARP_count = trans["arp"]
ICMP_count = trans["icmp"]
if TCP_count + UDP_count + ARP_count + ICMP_count == 0:
reply = QMessageBox.information(self, "提示", "你还没有抓包!",
QMessageBox.Cancel)
else:
labels = 'TCP', 'ICMP', 'UDP', 'ARP'
fracs = [TCP_count, ICMP_count, UDP_count, ARP_count]
explode = [0.1, 0.1, 0.1, 0.1] # 0.1 凸出这部分,
plt.axes(
aspect=1
) # set this , Figure is round, otherwise it is an ellipse
# autopct ,show percet
plt.pie(
x=fracs,
labels=labels,
explode=explode,
autopct='%3.1f %%',
shadow=True,
labeldistance=1.1,
startangle=90,
pctdistance=0.6)
plt.show()
"""
打开文件事件
"""
def on_action_openfile_clicked(self):
if self.core.start_flag or self.core.pause_flag:
QMessageBox.warning(self, "警告", "请停止当前抓包!")
return
self.core.open_pcap_file()
"""
保存文件点击事件
"""
def on_action_savefile_clicked(self):
if self.core.start_flag or self.core.pause_flag:
QMessageBox.warning(self, "警告", "请停止当前抓包!")
return
self.core.save_captured_to_pcap()
"""
菜单栏追踪流键点击事件
"""
def on_action_track_clicked(self):
if not self.Monitor or not self.Monitor.is_alive():
self.Monitor = Process(target=start_monitor)
self.Monitor.start()
''
def forged_action_clicked(self):
if not self.Forged or not self.Forged.is_alive():
self.Forged = Process(target=startForged)
self.Forged.start()
about = "软件著作者:张桓皓 张兴\n\n" + "软件主要功能如下:\n" + "1.对网络接口数据包尽可能多的捕获,可以将网卡设置为混杂模式,然后进行数据包的采集;\n" + "2.对捕获的数据包进行一定的解析,将报文在网络层和传输层逐字段展开,对数据包的协议类型、源目的地址、数据包截获时间、数据包内容进行分析;\n" + "3.根据用户不同的要求能够依据特定指定地址、特定协议类型相关包等条件进行自定义监视;\n" + "4.针对应用进行流量监测,监测结果输出实时流量图显示,管理员可设置流量上限,当应用流量超过这个最高限度时可以向管理员进行报警;\n" + "5.系统提供了多种方式显示结果,如以饼状图的形式统计ARP报文、TCP报文、UDP报文ICMP报文进行统计,以柱状图的形式统计IPv4报文、IPv6报文进行统计,以折线图的形式实时显示具体应用流量;\n" + "6.实现数据包保存,便于日后分析,即将捕获到的数据包,可另存为一个文件,并能被本系统所读取和展示;\n" + "7.伪造报文实现网络反攻击或进行深入微调IP或传输层的域。\n\n" + "*解释权归著作者所有"
def on_action_about_clicked(self):
QMessageBox.information(self, "关于", self.about)
"""
退出点击事件
"""
def on_action_exit_clicked(self, event):
self.closeEvent(event)
"""
进度加载框
num: 加载数据数量
"""
def showDialog(self, num):
progress = QProgressDialog(self)
progress.setWindowTitle("请稍等")
progress.setLabelText("正在加载数据...")
progress.setCancelButtonText("取消")
progress.setMinimumDuration(1) #进度条加载时间
progress.setWindowModality(Qt.WindowModal)
progress.setRange(0, num)
for i in range(num):
progress.setValue(i)
if progress.wasCanceled():
QMessageBox.warning(self, "提示", "操作失败")
break
progress.setValue(num)
QMessageBox.information(self, "提示", "操作成功")
"""键盘点击事件"""
def keyReleaseEvent(self, event):
if event.key() == Qt.Key_Up or event.key() == Qt.Key_Down:
self.timer.stop()
selected_row = self.info_tree.currentItem().text(0)
if selected_row and selected_row.isdigit():
self.show_infoTree(int(selected_row))
self.action_update.setDisabled(False)
if event.key() == Qt.Key_F5:
self.timer.start(flush_time)
self.action_update.setDisabled(True)
def start():
app = QApplication([])
ui = Ui_MainWindow()
ui.setupUi()
app.exec()
|
threading_tests.py
|
import threading
import time
import logging
import json
import urllib.request
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
class ThreadPool(object):
def __init__(self):
super(ThreadPool, self).__init__()
self.active = []
self.lock = threading.Lock()
def makeActive(self, name):
with self.lock:
self.active.append(name)
# logging.debug('Running: %s', self.active)
def makeInactive(self, name):
with self.lock:
self.active.remove(name)
# logging.debug('Thread %s ended', name)
def bP(t, s, pool):
#logging.debug('Waiting to join the pool')
with s:
name = threading.currentThread().getName()
pool.makeActive(name)
try:
token_eth = json.load(urllib.request.urlopen("https://bittrex.com/api/v1.1/public/getticker?&market=ETH-" + t))
token_btc = json.load(urllib.request.urlopen("https://bittrex.com/api/v1.1/public/getticker?&market=BTC-" + t))
usd_btc = json.load(urllib.request.urlopen("https://bittrex.com/api/v1.1/public/getticker?&market=usdt-btc"))
usd_eth = json.load(urllib.request.urlopen("https://bittrex.com/api/v1.1/public/getticker?&market=usdt-eth"))
except urllib.error.URLError:
print("timeout")
pool.makeInactive(name)
return
try:
token_usd_eth = usd_eth['result']['Ask'] * token_eth['result']['Bid']
except TypeError:
token_usd_eth = 0
# print("No arb")
pool.makeInactive(name)
return
try:
token_usd_btc = usd_btc['result']['Ask'] * token_btc['result']['Bid']
except TypeError:
token_usd_btc = 0
# print("No arb")
pool.makeInactive(name)
return
if (token_usd_btc > token_usd_eth):
profit = (token_usd_btc - token_usd_eth)
elif (token_usd_btc < token_usd_eth):
profit = (token_usd_eth - token_usd_btc)
elif (token_usd_btc == token_usd_eth):
profit = 0
tx_fee = (profit / 100) / 4
if (profit > .2 and token_usd_eth != 0 and token_usd_btc != 0):
print(t + " If you buy in terms of BTC, this token will cost $" + str(token_usd_btc) + " In terms of ETH it will cost: $" + str(token_usd_eth) + " USD profit per " + t + " : $" + str(profit - tx_fee) + " Transaction fee: $" + str(tx_fee))
pool.makeInactive(name)
return
else:
#print("No arb here")
pool.makeInactive(name)
return
if __name__ == '__main__':
pool = ThreadPool()
s = threading.Semaphore(10)
threads = []
tokens = {'SALT'};
marketSnapshot = json.load(urllib.request.urlopen("https://bittrex.com/api/v1.1/public/getmarkets"))
for r in marketSnapshot['result']:
pivot = r['MarketCurrency']
base = r['BaseCurrency']
# market_string = base + "-" + pivot
tokens.add(pivot)
while 1:
print("Starting batch...")
threads = []
count = 0
start_time = time.time()
for t in tokens:
temp = threading.Thread(target=bP, name='thread_'+t, args=(t, s, pool))
temp.start()
threads.append(temp)
for x in threads:
count = count + 1
x.join()
# print(threading.active_count())
end_time = time.time()
total = end_time - start_time
print("This batch took " + str(total) + " seconds to run " + str(count) +" threads. Waiting 30s for next batch...")
# print("Sleeping 30 sec for next batch.")
time.sleep(30)
# for i in range(10):
# t = threading.Thread(target=f, name='thread_'+str(i), args=(s, pool))
# t.start()
|
run_parallel_sensitivity_analysis 2.py
|
import sys
import os
from multiprocessing import Process
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
import sys
import os
from multiprocessing import Process
import time
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
import os
from gym import spaces
from gym.spaces import Tuple, Box
import numpy as np
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.utils.policy_server import PolicyServer
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
from gym.spaces import Tuple, Box, MultiDiscrete, Discrete
from ray import tune
from ray.rllib.env.multi_agent_env import MultiAgentEnv
# from run.intelligent_bidding.run.run import run_scenario
# from run.intelligent_bidding.RL_server.intelligent_bidding_rl_server import run_agent
import multiprocessing as mp
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
import pandas as pd
import os.path
import sys
import numpy as np
import pickle
from fitter import Fitter
import fitter
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..'))
from elecsim.model.world import World
import tracemalloc
import pandas as pd
import linecache
import time
from elecsim.constants import ROOT_DIR
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
from scipy.stats import johnsonsb, skewnorm, dgamma, genlogistic, dweibull, johnsonsu
import ray
from concurrent import futures
@ray.remote
def run_scenario(gencos_rl_bidding, port_number):
print("Running scenario with: {}".format(gencos_rl_bidding))
time.sleep(180)
beis_params = [0.00121256259168, 46.850377392563864, 0.0029982421515, 28.9229765616468, 0.00106156336814,
18.370337670063762, 0.00228312539654, 0.0, 0.0024046471141100003, 34.43480109190594, 0.0,
-20.88014916953091, 0.0, 8.15032953348701, 0.00200271495761, -12.546185375581802, 0.00155518243668,
39.791132970522796, 0.00027449937576, 8.42878689508516, 0.00111989525697, 19.81640207212787,
0.00224091998324, 5.26288570922149, 0.00209189353332, -5.9117317131295195, 0.00240696026847,
-5.0144941135222, 0.00021183142492999999, -1.29658413335784, 0.00039441444392000004,
-11.41659250225168, 0.00039441444392000004, -11.41659250225168, 120.21276910611674, 0.0,
0.00059945111227]
prices_individual = np.array(beis_params[:-3]).reshape(-1, 2).tolist()
MARKET_TIME_SPLICES = 8
YEARS_TO_RUN = 1
number_of_steps = YEARS_TO_RUN * MARKET_TIME_SPLICES
scenario_2018 = "../scenario/reference_scenario_2018.py".format(ROOT_DIR)
carbon_df = pd.read_csv('linear_data_exploded.csv'.format(ROOT_DIR))
carbon_list = carbon_df.x.tolist()
# result_distributions_object = pickle.load(open(
# "{}/run/market_forecasting_comparison/run/Compare_worlds/result_distributions_object.p".format(ROOT_DIR),
# "rb"))
#
# resultant_dist = '{}'
#
# dist_class = eval(list(result_distributions_object[resultant_dist].fitted_param.keys())[0] + ".rvs")
# dist_object = dist_class(*list(result_distributions_object[resultant_dist].fitted_param.values())[0],
# size=50000).tolist()
while True:
world = World(carbon_price_scenario=carbon_list, initialization_year=2018, scenario_file=scenario_2018,
market_time_splices=MARKET_TIME_SPLICES, data_folder="compare_ml_accuracy",
number_of_steps=number_of_steps, long_term_fitting_params=prices_individual, highest_demand=63910,
nuclear_subsidy=beis_params[-3], future_price_uncertainty_m=beis_params[-2],
future_price_uncertainty_c=beis_params[-1], dropbox=False, gencos_rl=gencos_rl_bidding,
write_data_to_file=True, rl_port_number=port_number)
for _ in range(YEARS_TO_RUN):
for i in range(MARKET_TIME_SPLICES):
# try:
if i / 8 == 0:
print('end of year')
world.step()
# SERVER_ADDRESS = "rllibserver"
# SERVER_ADDRESS = "localhost"
# SERVER_PORT = 9920
# CHECKPOINT_FILE = "last_checkpoint.out"
class MarketServing(ExternalEnv):
def __init__(self, number_of_plants, server_port=9920, max_bid=600):
self.SERVER_ADDRESS = "localhost"
# SERVER_PORT = 9920
self.CHECKPOINT_FILE = "last_checkpoint.out"
self.server_port = server_port
self.max_bid = max_bid
self.number_of_plants = number_of_plants
lower_bounds = [-100000] * 7
# lower_bounds.extend([-99999])
upper_bounds = [10000000] * 7
# upper_bounds.extend([99999])
ExternalEnv.__init__(
self,
# MultiDiscrete([16, 10]),
# Discrete(159),
# action_space=Box(shape=37),
# action_space=Box(low=0, high=200, shape=(37,), dtype=np.float),
action_space=Box(low=0, high=self.max_bid, shape=(self.number_of_plants,), dtype=np.float),
observation_space=Box(np.array(lower_bounds), np.array(upper_bounds)))
def run(self):
print("Starting policy server at {}:{}".format(self.SERVER_ADDRESS,
self.server_port))
server = PolicyServer(self, self.SERVER_ADDRESS, self.server_port)
server.serve_forever()
# if __name__ == "__main__":
@ray.remote
def run_agent(port_number, number_of_plants=14, max_bid=600):
# ray.init(redis_max_memory=10000000000, object_store_memory=3000000000, memory=2000000000)
print("Starting agent")
# ray.init()
# number_of_plants = 25
# number_of_plants = 37
register_env("srv_{}".format(port_number), lambda _: MarketServing(number_of_plants, port_number, max_bid))
tune.run_experiments({
"rl_bidding_{}_{}_max_bid_{}".format(number_of_plants, port_number, max_bid): {
# "run": "PG",
"run": "DDPG",
"env": "srv_{}".format(port_number),
# 'checkpoint_at_end': True,
# 'checkpoint_freq': 5,
# 'restore': '../../../../../../../ray_results/rl_bidding/DDPG_srv_0_2020-05-25_16-11-377wk6ln6z/checkpoint_30/checkpoint-30',
"config": {
# "num_gpus": 0,
# "num_workers": 1,
"env": "srv_{}".format(port_number),
"evaluation_num_episodes": 1,
# "sgd_stepsize": tune.grid_search([0.01, 0.001, 0.0001])
"sample_batch_size": 100,
"train_batch_size": 200,
# "horizon": 25,
# "exploration_config": {
# # The Exploration class to use.
# "type": "EpsilonGreedy",
# # Config for the Exploration class' constructor:
# "initial_epsilon": 1.0,
# "final_epsilon": 0.1,
# "epsilon_timesteps": 10000, # Timesteps over which to anneal epsilon.
# For soft_q, use:
# "exploration_config" = {
# "type": "SoftQ"
# "temperature": [float, e.g. 1.0]
# }
},
}
# }
})
@ray.remote
def run_agent_and_server_parallel(port_number, gencos_rl_bidding, number_of_plants, max_bid):
print(port_number)
print(gencos_rl_bidding)
ray.get([run_agent.remote(port_number, number_of_plants, max_bid), run_scenario.remote(gencos_rl_bidding, port_number)])
# p1 = Process(target=run_agent, args=(port_number,))
# p1.start()
#
# p2 = Process(target=run_scenario, args=(gencos_rl_bidding, port_number))
# p2.start()
#
# p1.join()
# p2.join()
if __name__ == "__main__":
ray.init(num_cpus=mp.cpu_count()-1)
# gencos_rl_bidding = ['EDF Energy', 'RWE Generation SE', 'test']
# gencos_rl_bidding = [["EDF Energy"],
# ["EDF Energy", "RWE Generation SE"],
# ["EDF Energy", "RWE Generation SE", "SSE"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited", "Scottish power"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited", "Scottish power",
# "Drax Power Ltd"], ['Orsted'], ['RWE Generation SE'], ['SSE'], ['Uniper UK Limited'],
# ['Scottish power'], ['Drax Power Ltd'], ["Magnox Ltd"]]
gencos_rl_bidding = ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited", "Scottish power", "Drax Power Ltd"]
# number_of_plants = [14, 25, 155, 164, 213, 216, 11, 11, 130, 9, 49, 3]
number_of_plants = 216
# plant_names = [["EDF Energy"],
# ["EDF Energy", "RWE Generation SE"],
# ["EDF Energy", "RWE Generation SE", "SSE"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited", "Scottish power"],
# ["EDF Energy", "RWE Generation SE", "SSE", "Uniper UK Limited", "Scottish power",
# "Drax Power Ltd"], ['Orsted'], ['RWE Generation SE'], ['SSE'], ['Uniper UK Limited'],
# ['Scottish power'], ['Drax Power Ltd'], ["Magnox Ltd"]]
# max_bid = 150
# max_bids = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200]
max_bids = [120, 140, 160, 180, 190]
# max_bid = 600
# for genco_group in gencos_rl_bidding:
# print(genco_group)
results = []
# for port_number, gencos, plant_number in zip(range(9951, 9951+len(gencos_rl_bidding)), gencos_rl_bidding, number_of_plants):
for port_number, max_bid in zip(range(9951, 9951+len(max_bids)), max_bids):
print(port_number)
print(max_bid)
# result = run_agent_and_server_parallel.remote(port_number, gencos)
result = run_agent_and_server_parallel.remote(port_number, gencos_rl_bidding, number_of_plants, max_bid)
results.append(result)
ray.get(results)
|
ThreadJoin.py
|
from threading import Thread
global exit_flag
exit_flag = False
def f():
i = 1
while not exit_flag:
i = (i + 1) % 100000000
if i % 100000 == 0: print("f making progress: {0}".format(i))
from threading import Thread
if __name__ == '__main__':
t1 = Thread(target=f,name="F_thread")
t1.start()
t1.join()
|
selenium_utils.py
|
from chromedriver_py import binary_path as driver_path
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import Chrome, ChromeOptions # TODO: Combine these two dependencies. Leaving it for now since it touches too many sites atm.
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from utils import create_msg
import random, re, requests, string, threading
options = Options()
options.add_experimental_option(
"excludeSwitches", ["enable-automation", "enable-logging"]
)
options.add_experimental_option("useAutomationExtension", False)
class AnyEc:
"""Use with WebDriverWait to combine expected_conditions
in an OR.
"""
def __init__(self, *args):
self.ecs = args
def __call__(self, driver):
for fn in self.ecs:
try:
if fn(driver):
return True
except:
pass
def no_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
def yes_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 0}
options.add_experimental_option("prefs", prefs)
def wait_for_element(d, e_id, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(ec.presence_of_element_located((By.ID, e_id)))
def wait_for_element_by_xpath(d, e_path, time=30):
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.XPATH, e_path))
)
def wait_for_element_by_class(d, e_class, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.CLASS_NAME, e_class))
)
def wait_for_title(d, title, path):
"""
Uses webdriver(d) to navigate to get(path) until it equals title(title)
"""
while d.title != title:
d.get(path)
WebDriverWait(d, 1000)
def wait_for_page(d, title, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
WebDriverWait(d, time).until(ec.title_is(title))
def wait_for_either_title(d, title1, title2, time=30):
"""
Uses webdriver(d) to wait for page title(title1 or title2) to become visible
"""
try:
WebDriverWait(d, time).until(AnyEc(ec.title_is(title1), ec.title_is(title2)))
except Exception:
pass
def wait_for_any_title(d, titles, time=30):
"""
Uses webdriver(d) to wait for page title(any in the list of titles) to become visible
"""
WebDriverWait(d, time).until(AnyEc(*[ec.title_is(title) for title in titles]))
def button_click_using_xpath(d, xpath):
"""
Uses webdriver(d) to click a button using an XPath(xpath)
"""
button_menu = WebDriverWait(d, 10).until(
ec.element_to_be_clickable((By.XPATH, xpath))
)
action = ActionChains(d)
action.move_to_element(button_menu).pause(1).click().perform()
def field_send_keys(d, field, keys):
"""
Uses webdriver(d) to fiend a field(field), clears it and sends keys(keys)
"""
elem = d.find_element_by_name(field)
elem.clear()
elem.send_keys(keys)
def has_class(element, class_name):
classes = element.get_attribute("class")
return class_name in classes
def add_cookies_to_session_from_driver(driver, session):
cookies = driver.get_cookies()
[
session.cookies.set_cookie(
requests.cookies.create_cookie(
domain=cookie["domain"],
name=cookie["name"],
value=cookie["value"],
)
)
for cookie in cookies
]
def enable_headless():
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
# https://stackoverflow.com/questions/33225947/can-a-website-detect-when-you-are-using-selenium-with-chromedriver
def change_driver(status_signal, loc):
fin = open(loc, 'rb')
data = fin.read()
val = "$" + "".join(random.choices(string.ascii_lowercase, k=3)) + "_" + \
"".join(random.choices(string.ascii_letters + string.digits, k=22)) + "_"
result = re.search(b"[$][a-z]{3}_[a-zA-Z0-9]{22}_", data)
if result is not None:
try:
status_signal.emit(create_msg("Changing value in Chromedriver", "normal"))
data = data.replace(result.group(0), val.encode())
fin.close()
fin = open(loc, 'wb')
fin.truncate()
fin.write(data)
fin.close()
except:
status_signal.emit(create_msg("Error modifying chromedriver", "error"))
else:
fin.close()
def open_browser(link, cookies):
threading.Thread(target=start_browser, args=(link, cookies)).start()
def start_browser(link, cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value": cookie["value"],
"domain": cookie["domain"]
})
driver.get(link)
|
_polling.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import time
import threading
import uuid
from typing import TYPE_CHECKING
from azure.core.polling import PollingMethod, LROPoller, NoPolling
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Any, Callable, Union, List, Optional
logger = logging.getLogger(__name__)
class KeyVaultOperationPoller(LROPoller):
"""Poller for long running operations where calling result() doesn't wait for operation to complete.
"""
# pylint: disable=arguments-differ
def __init__(self, polling_method):
# type: (PollingMethod) -> None
super(KeyVaultOperationPoller, self).__init__(None, None, None, NoPolling())
self._polling_method = polling_method
# pylint: disable=arguments-differ
def result(self):
# type: () -> Any
"""Returns a representation of the final resource without waiting for the operation to complete.
:returns: The deserialized resource of the long running operation
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a number of seconds.
You can check if this call has ended with timeout with the "done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start), name="KeyVaultOperationPoller({})".format(uuid.uuid4())
)
self._thread.daemon = True
self._thread.start()
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
raise self._exception # type: ignore
except TypeError: # Was None
pass
class DeleteRecoverPollingMethod(PollingMethod):
"""Poller for deleting resources, and recovering deleted resources, in vaults with soft-delete enabled.
This works by polling for the existence of the deleted or recovered resource. When a resource is deleted, Key Vault
immediately removes it from its collection. However, the resource will not immediately appear in the deleted
collection. Key Vault will therefore respond 404 to GET requests for the deleted resource; when it responds 2xx
or 403, the resource exists in the deleted collection, i.e. its deletion is complete.
Similarly, while recovering a deleted resource, Key Vault will respond 404 to GET requests for the non-deleted
resource; when it responds 2xx or 403, the resource exists in the non-deleted collection, i.e. its rec+
(403 indicates completion of these operations because Key Vault responds 403 when a resource exists but the client
lacks permission to access it.)
"""
def __init__(self, command, final_resource, finished, interval=2):
self._command = command
self._resource = final_resource
self._polling_interval = interval
self._finished = finished
def _update_status(self):
# type: () -> None
try:
self._command()
self._finished = True
except ResourceNotFoundError:
pass
except HttpResponseError as e:
# If we are polling on get_deleted_* and we don't have get permissions, we will get
# ResourceNotFoundError until the resource is recovered, at which point we'll get a 403.
if e.status_code == 403:
self._finished = True
else:
raise
def initialize(self, client, initial_response, deserialization_callback):
pass
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
if not self.finished():
time.sleep(self._polling_interval)
except Exception as e:
logger.warning(str(e))
raise
def finished(self):
# type: () -> bool
return self._finished
def resource(self):
# type: () -> Any
return self._resource
def status(self):
# type: () -> str
return "finished" if self._finished else "polling"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.