source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
motor_safety.py
|
#!/usr/bin/python3
import os
import time
import threading
import RPi.GPIO as GPIO
lock = threading.Lock()
run = True
def toggle():
frequency = 25
toggle_pin = 14
GPIO.setup(toggle_pin, GPIO.OUT)
state = True
my_run = True
while my_run:
GPIO.output(toggle_pin, state)
state = not state
time.sleep(1/frequency/2)
with lock:
my_run = run
GPIO.cleanup(toggle_pin)
if __name__ == '__main__':
shutdown_pin = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(shutdown_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
t = threading.Thread(name='toggle', target=toggle)
t.start()
while True:
try:
if not GPIO.input(shutdown_pin):
time.sleep(0.1)
if not GPIO.input(shutdown_pin):
break
time.sleep(0.5)
except Exception as e:
pass
with lock:
run = False
time.sleep(1)
GPIO.cleanup(shutdown_pin)
os.system(f'sudo shutdown now')
|
program.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for TensorBoard command line program.
This is a lightweight module for bringing up a TensorBoard HTTP server
or emulating the `tensorboard` shell command.
Those wishing to create custom builds of TensorBoard can use this module
by swapping out `tensorboard.main` with the custom definition that
modifies the set of plugins and static assets.
This module does not depend on first-party plugins or the default web
server assets. Those are defined in `tensorboard.default`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import argparse
import atexit
from collections import defaultdict
import errno
import os
import signal
import socket
import sys
import threading
import time
import inspect
import absl.logging
import six
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from tensorboard import manager
from tensorboard import version
from tensorboard.backend import application
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.util import tb_logging
try:
from absl import flags as absl_flags
from absl.flags import argparse_flags
except ImportError:
# Fall back to argparse with no absl flags integration.
absl_flags = None
argparse_flags = argparse
logger = tb_logging.get_logger()
def setup_environment():
"""Makes recommended modifications to the environment.
This functions changes global state in the Python process. Calling
this function is a good idea, but it can't appropriately be called
from library routines.
"""
absl.logging.set_verbosity(absl.logging.WARNING)
# The default is HTTP/1.0 for some strange reason. If we don't use
# HTTP/1.1 then a new TCP socket and Python thread is created for
# each HTTP request. The tradeoff is we must always specify the
# Content-Length header, or do chunked encoding for streaming.
serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
def get_default_assets_zip_provider():
"""Opens stock TensorBoard web assets collection.
Returns:
Returns function that returns a newly opened file handle to zip file
containing static assets for stock TensorBoard, or None if webfiles.zip
could not be found. The value the callback returns must be closed. The
paths inside the zip file are considered absolute paths on the web server.
"""
path = os.path.join(os.path.dirname(inspect.getfile(sys._getframe(1))),
'webfiles.zip')
if not os.path.exists(path):
logger.warning('webfiles.zip static assets not found: %s', path)
return None
return lambda: open(path, 'rb')
class TensorBoard(object):
"""Class for running TensorBoard.
Fields:
plugin_loaders: Set from plugins passed to constructor.
assets_zip_provider: Set by constructor.
server_class: Set by constructor.
flags: An argparse.Namespace set by the configure() method.
cache_key: As `manager.cache_key`; set by the configure() method.
"""
def __init__(self,
plugins=None,
assets_zip_provider=None,
server_class=None):
"""Creates new instance.
Args:
plugins: A list of TensorBoard plugins to load, as TBLoader instances or
TBPlugin classes. If not specified, defaults to first-party plugins.
assets_zip_provider: Delegates to TBContext or uses default if None.
server_class: An optional factory for a `TensorBoardServer` to use
for serving the TensorBoard WSGI app. If provided, its callable
signature should match that of `TensorBoardServer.__init__`.
:type plugins: list[Union[base_plugin.TBLoader, Type[base_plugin.TBPlugin]]]
:type assets_zip_provider: () -> file
:type server_class: class
"""
if plugins is None:
from tensorboard import default
plugins = default.get_plugins()
if assets_zip_provider is None:
assets_zip_provider = get_default_assets_zip_provider()
if server_class is None:
server_class = create_port_scanning_werkzeug_server
def make_loader(plugin):
if isinstance(plugin, base_plugin.TBLoader):
return plugin
if issubclass(plugin, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin)
raise ValueError("Not a TBLoader or TBPlugin subclass: %s" % plugin)
self.plugin_loaders = [make_loader(p) for p in plugins]
self.assets_zip_provider = assets_zip_provider
self.server_class = server_class
self.flags = None
def configure(self, argv=('',), **kwargs):
"""Configures TensorBoard behavior via flags.
This method will populate the "flags" property with an argparse.Namespace
representing flag values parsed from the provided argv list, overridden by
explicit flags from remaining keyword arguments.
Args:
argv: Can be set to CLI args equivalent to sys.argv; the first arg is
taken to be the name of the path being executed.
kwargs: Additional arguments will override what was parsed from
argv. They must be passed as Python data structures, e.g.
`foo=1` rather than `foo="1"`.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
Raises:
ValueError: If flag values are invalid.
"""
parser = argparse_flags.ArgumentParser(
prog='tensorboard',
description=('TensorBoard is a suite of web applications for '
'inspecting and understanding your TensorFlow runs '
'and graphs. https://github.com/tensorflow/tensorboard '))
for loader in self.plugin_loaders:
loader.define_flags(parser)
arg0 = argv[0] if argv else ''
flags = parser.parse_args(argv[1:]) # Strip binary name from argv.
self.cache_key = manager.cache_key(
working_directory=os.getcwd(),
arguments=argv[1:],
configure_kwargs=kwargs,
)
if absl_flags and arg0:
# Only expose main module Abseil flags as TensorBoard native flags.
# This is the same logic Abseil's ArgumentParser uses for determining
# which Abseil flags to include in the short helpstring.
for flag in set(absl_flags.FLAGS.get_key_flags_for_module(arg0)):
if hasattr(flags, flag.name):
raise ValueError('Conflicting Abseil flag: %s' % flag.name)
setattr(flags, flag.name, flag.value)
for k, v in kwargs.items():
if not hasattr(flags, k):
raise ValueError('Unknown TensorBoard flag: %s' % k)
setattr(flags, k, v)
for loader in self.plugin_loaders:
loader.fix_flags(flags)
self.flags = flags
return [arg0]
def main(self, ignored_argv=('',)):
"""Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
"""
self._install_signal_handler(signal.SIGTERM, "SIGTERM")
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
sys.stderr.write('TensorBoard %s at %s (Press CTRL+C to quit)\n' %
(version.VERSION, server.get_url()))
sys.stderr.flush()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching TensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the TensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='TensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _register_info(self, server):
"""Write a TensorBoardInfo file and arrange for its cleanup.
Args:
server: The result of `self._make_server()`.
"""
server_url = urllib.parse.urlparse(server.get_url())
info = manager.TensorBoardInfo(
version=version.VERSION,
start_time=int(time.time()),
port=server_url.port,
pid=os.getpid(),
path_prefix=self.flags.path_prefix,
logdir=self.flags.logdir,
db=self.flags.db,
cache_key=self.cache_key,
)
atexit.register(manager.remove_info_file)
manager.write_info_file(info)
def _install_signal_handler(self, signal_number, signal_name):
"""Set a signal handler to gracefully exit on the given signal.
When this process receives the given signal, it will run `atexit`
handlers and then exit with `0`.
Args:
signal_number: The numeric code for the signal to handle, like
`signal.SIGTERM`.
signal_name: The human-readable signal name.
"""
old_signal_handler = None # set below
def handler(handled_signal_number, frame):
# In case we catch this signal again while running atexit
# handlers, take the hint and actually die.
signal.signal(signal_number, signal.SIG_DFL)
sys.stderr.write("TensorBoard caught %s; exiting...\n" % signal_name)
# The main thread is the only non-daemon thread, so it suffices to
# exit hence.
if old_signal_handler not in (signal.SIG_IGN, signal.SIG_DFL):
old_signal_handler(handled_signal_number, frame)
sys.exit(0)
old_signal_handler = signal.signal(signal_number, handler)
def _make_server(self):
"""Constructs the TensorBoard WSGI app and instantiates the server."""
app = application.standard_tensorboard_wsgi(self.flags,
self.plugin_loaders,
self.assets_zip_provider)
return self.server_class(app, self.flags)
class TensorBoardServer(object):
"""Class for customizing TensorBoard WSGI app serving."""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, wsgi_app, flags):
"""Create a flag-configured HTTP server for TensorBoard's WSGI app.
Args:
wsgi_app: The TensorBoard WSGI application to create a server for.
flags: argparse.Namespace instance of TensorBoard flags.
"""
raise NotImplementedError()
@abstractmethod
def serve_forever(self):
"""Blocking call to start serving the TensorBoard server."""
raise NotImplementedError()
@abstractmethod
def get_url(self):
"""Returns a URL at which this server should be reachable."""
raise NotImplementedError()
class TensorBoardServerException(Exception):
"""Exception raised by TensorBoardServer for user-friendly errors.
Subclasses of TensorBoardServer can raise this exception in order to
generate a clean error message for the user rather than a stacktrace.
"""
def __init__(self, msg):
self.msg = msg
class TensorBoardPortInUseError(TensorBoardServerException):
"""Error raised when attempting to bind to a port that is in use.
This should be raised when it is expected that binding to another
similar port would succeed. It is used as a signal to indicate that
automatic port searching should continue rather than abort.
"""
pass
def with_port_scanning(cls):
"""Create a server factory that performs port scanning.
This function returns a callable whose signature matches the
specification of `TensorBoardServer.__init__`, using `cls` as an
underlying implementation. It passes through `flags` unchanged except
in the case that `flags.port is None`, in which case it repeatedly
instantiates the underlying server with new port suggestions.
Args:
cls: A valid implementation of `TensorBoardServer`. This class's
initializer should raise a `TensorBoardPortInUseError` upon
failing to bind to a port when it is expected that binding to
another nearby port might succeed.
The initializer for `cls` will only ever be invoked with `flags`
such that `flags.port is not None`.
Returns:
A function that implements the `__init__` contract of
`TensorBoardServer`.
"""
def init(wsgi_app, flags):
# base_port: what's the first port to which we should try to bind?
# should_scan: if that fails, shall we try additional ports?
# max_attempts: how many ports shall we try?
should_scan = flags.port is None
base_port = core_plugin.DEFAULT_PORT if flags.port is None else flags.port
max_attempts = 10 if should_scan else 1
if base_port > 0xFFFF:
raise TensorBoardServerException(
'TensorBoard cannot bind to port %d > %d' % (base_port, 0xFFFF)
)
max_attempts = 10 if should_scan else 1
base_port = min(base_port + max_attempts, 0x10000) - max_attempts
for port in xrange(base_port, base_port + max_attempts):
subflags = argparse.Namespace(**vars(flags))
subflags.port = port
try:
return cls(wsgi_app=wsgi_app, flags=subflags)
except TensorBoardPortInUseError:
if not should_scan:
raise
# All attempts failed to bind.
raise TensorBoardServerException(
'TensorBoard could not bind to any port around %s '
'(tried %d times)'
% (base_port, max_attempts))
return init
class WerkzeugServer(serving.ThreadedWSGIServer, TensorBoardServer):
"""Implementation of TensorBoardServer using the Werkzeug dev server."""
# ThreadedWSGIServer handles this in werkzeug 0.12+ but we allow 0.11.x.
daemon_threads = True
def __init__(self, wsgi_app, flags):
self._flags = flags
host = flags.host
port = flags.port
# Without an explicit host, we default to serving on all interfaces,
# and will attempt to serve both IPv4 and IPv6 traffic through one
# socket.
self._auto_wildcard = not host
if self._auto_wildcard:
host = self._get_wildcard_address(port)
try:
super(WerkzeugServer, self).__init__(host, port, wsgi_app)
except socket.error as e:
if hasattr(errno, 'EACCES') and e.errno == errno.EACCES:
raise TensorBoardServerException(
'TensorBoard must be run as superuser to bind to port %d' %
port)
elif hasattr(errno, 'EADDRINUSE') and e.errno == errno.EADDRINUSE:
if port == 0:
raise TensorBoardServerException(
'TensorBoard unable to find any open port')
else:
raise TensorBoardPortInUseError(
'TensorBoard could not bind to port %d, it was already in use' %
port)
elif hasattr(errno, 'EADDRNOTAVAIL') and e.errno == errno.EADDRNOTAVAIL:
raise TensorBoardServerException(
'TensorBoard could not bind to unavailable address %s' % host)
elif hasattr(errno, 'EAFNOSUPPORT') and e.errno == errno.EAFNOSUPPORT:
raise TensorBoardServerException(
'Tensorboard could not bind to unsupported address family %s' %
host)
# Raise the raw exception if it wasn't identifiable as a user error.
raise
def _get_wildcard_address(self, port):
"""Returns a wildcard address for the port in question.
This will attempt to follow the best practice of calling getaddrinfo() with
a null host and AI_PASSIVE to request a server-side socket wildcard address.
If that succeeds, this returns the first IPv6 address found, or if none,
then returns the first IPv4 address. If that fails, then this returns the
hardcoded address "::" if socket.has_ipv6 is True, else "0.0.0.0".
"""
fallback_address = '::' if socket.has_ipv6 else '0.0.0.0'
if hasattr(socket, 'AI_PASSIVE'):
try:
addrinfos = socket.getaddrinfo(None, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_PASSIVE)
except socket.gaierror as e:
logger.warn('Failed to auto-detect wildcard address, assuming %s: %s',
fallback_address, str(e))
return fallback_address
addrs_by_family = defaultdict(list)
for family, _, _, _, sockaddr in addrinfos:
# Format of the "sockaddr" socket address varies by address family,
# but [0] is always the IP address portion.
addrs_by_family[family].append(sockaddr[0])
if hasattr(socket, 'AF_INET6') and addrs_by_family[socket.AF_INET6]:
return addrs_by_family[socket.AF_INET6][0]
if hasattr(socket, 'AF_INET') and addrs_by_family[socket.AF_INET]:
return addrs_by_family[socket.AF_INET][0]
logger.warn('Failed to auto-detect wildcard address, assuming %s',
fallback_address)
return fallback_address
def server_bind(self):
"""Override to enable IPV4 mapping for IPV6 sockets when desired.
The main use case for this is so that when no host is specified, TensorBoard
can listen on all interfaces for both IPv4 and IPv6 connections, rather than
having to choose v4 or v6 and hope the browser didn't choose the other one.
"""
socket_is_v6 = (
hasattr(socket, 'AF_INET6') and self.socket.family == socket.AF_INET6)
has_v6only_option = (
hasattr(socket, 'IPPROTO_IPV6') and hasattr(socket, 'IPV6_V6ONLY'))
if self._auto_wildcard and socket_is_v6 and has_v6only_option:
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except socket.error as e:
# Log a warning on failure to dual-bind, except for EAFNOSUPPORT
# since that's expected if IPv4 isn't supported at all (IPv6-only).
if hasattr(errno, 'EAFNOSUPPORT') and e.errno != errno.EAFNOSUPPORT:
logger.warn('Failed to dual-bind to IPv4 wildcard: %s', str(e))
super(WerkzeugServer, self).server_bind()
def handle_error(self, request, client_address):
"""Override to get rid of noisy EPIPE errors."""
del request # unused
# Kludge to override a SocketServer.py method so we can get rid of noisy
# EPIPE errors. They're kind of a red herring as far as errors go. For
# example, `curl -N http://localhost:6006/ | head` will cause an EPIPE.
exc_info = sys.exc_info()
e = exc_info[1]
if isinstance(e, IOError) and e.errno == errno.EPIPE:
logger.warn('EPIPE caused by %s in HTTP serving' % str(client_address))
else:
logger.error('HTTP serving error', exc_info=exc_info)
def get_url(self):
if self._auto_wildcard:
display_host = socket.gethostname()
else:
host = self._flags.host
display_host = (
'[%s]' % host if ':' in host and not host.startswith('[') else host)
return 'http://%s:%d%s/' % (display_host, self.server_port,
self._flags.path_prefix.rstrip('/'))
create_port_scanning_werkzeug_server = with_port_scanning(WerkzeugServer)
|
procutil.py
|
from multiprocessing import Process, Queue
import traceback
import logging
log = logging.getLogger(__name__)
def _wrapper(target, codec, queue, args, kwargs):
exc = tb = None
try:
if args is None:
args = []
if kwargs is None:
kwargs = {}
r = target(*args, **kwargs)
except Exception as e:
r = None
exc = e
tb = traceback.format_exc()
if codec:
r = codec[0](r)
queue.put((tb, exc, r))
def run_in_childprocess(target, codec=None, *args, **kwargs):
assert codec is None or len(codec) == 2, codec
queue = Queue()
p = Process(target=_wrapper, args=(target, codec, queue, args, kwargs))
p.start()
tb, e, r = queue.get()
p.join()
if e:
log.error(tb)
raise e
if codec:
r = codec[1](r)
return r
|
test_blocks.py
|
import threading
from ..base import BaseTest
from seafobj import block_mgr
Success = True
class TestSeafBlockManager(BaseTest):
def setUp(self):
self.repo_id = self.TEST_CEPH_REPO_ID
self.repo_id_2 = self.TEST_CEPH_REPO_ID_2
self.modified_bkid = '125f1e9dc9f3eca5a6819f9b4a2e17e53d7e2f78'
self.new_bkid = '2949afb5a9c351b9415b91c8f3d0d98991118c11'
self.renamed_bkid = 'b73b3cf6dc021d20c7a0e9bedf46a5b6a58bdd53'
self.moved_bkid = '1569cf662c7befe4c4891a22cc7a1c035bc8bfac'
def load_block(self):
seafblk = block_mgr.load_block(self.repo_id, 1, self.new_bkid)
self.assertIn(b'this is new file.', seafblk)
seafblk = block_mgr.load_block(self.repo_id, 1, self.modified_bkid)
self.assertIn(b'this is modified file', seafblk)
seafblk = block_mgr.load_block(self.repo_id, 1, self.renamed_bkid)
self.assertIn(b'this is renamed file.', seafblk)
seafblk = block_mgr.load_block(self.repo_id, 1, self.moved_bkid)
self.assertIn(b'this is moved file.', seafblk)
def load_block_2(self):
seafblk = block_mgr.load_block(self.repo_id_2, 1, self.new_bkid)
self.assertIn(b'this is new file.', seafblk)
seafblk = block_mgr.load_block(self.repo_id_2, 1, self.modified_bkid)
self.assertIn(b'this is modified file', seafblk)
seafblk = block_mgr.load_block(self.repo_id_2, 1, self.renamed_bkid)
self.assertIn(b'this is renamed file.', seafblk)
seafblk = block_mgr.load_block(self.repo_id_2, 1, self.moved_bkid)
self.assertIn(b'this is moved file.', seafblk)
def test_load_block(self):
test_multi = True
try:
obj_stores = block_mgr.obj_stores
except AttributeError:
test_multi = False
if test_multi:
for i in range(100):
self.load_block()
self.load_block_2()
else:
for i in range(100):
self.load_block()
def catch_with_load_block(self):
try:
self.test_load_block()
except AssertionError:
global Success
Success = False
#raise e
except Exception as e:
raise e
def test_load_block_with_multi_thread(self):
ths = []
for i in range(20):
th = threading.Thread(target=self.test_load_block)
ths.append(th)
th.start()
for th in ths:
th.join()
self.assertTrue(Success)
|
rs_localisation.py
|
import pyrealsense2 as rs
import numpy as np
import cv2
class RSLocalisation():
origin = [0,0]
threshold_m00 = 70000
def __init__(self):
self.pipeline = rs.pipeline()
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
depth_profile = rs.video_stream_profile(pipeline_profile.get_stream(rs.stream.depth))
self.depth_intrinsics = depth_profile.get_intrinsics()
self.profile = self.pipeline.start(config)
pass
def __del__(self):
print("Closing RS pipeline")
self.pipeline.stop()
def get_robot_position(self, true_position = False):
frames = self.get_frames()
colour_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
pixel_coordinate, valid = self.get_pixel_coordinate(colour_frame)
if not valid:
print("Robot not found")
return False
depth_at_pixel = depth_frame.get_distance(pixel_coordinate[0], pixel_coordinate[1]) #distance to pixel in m
coordinate_camera_frame = rs.rs2_deproject_pixel_to_point(self.depth_intrinsics, pixel_coordinate, depth_at_pixel)
if true_position:
coordinate_robot_frame = self.convert_to_robot_frame(coordinate_camera_frame, true_position = True)
else:
coordinate_robot_frame = self.convert_to_robot_frame(coordinate_camera_frame)
return coordinate_robot_frame
def get_frames(self):
align_to = rs.stream.depth
align = rs.align(align_to)
frames = self.pipeline.wait_for_frames()
aligned_frames = align.process(frames)
return aligned_frames
def get_colour_image(self):
frames = self.get_frames()
colour_frame = frames.get_color_frame()
colour_image = np.asanyarray(colour_frame.get_data())
return colour_image
def get_pixel_coordinate(self, colour_frame):
lower_colour = np.array([90,200,150])
upper_colour = np.array([110,255,255])
colour_image = np.asanyarray(colour_frame.get_data())
hsv_frame = cv2.cvtColor(colour_image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_frame, lower_colour, upper_colour)
#ret,thresh = cv2.threshold(masked_frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
moments = cv2.moments(mask)
#print(moments["m00"])
if moments["m00"] < self.threshold_m00:
return [0,0], False
centroid_X = int(moments["m10"] / moments["m00"])
centroid_Y = int(moments["m01"] / moments["m00"])
#Calibration code
# masked_frame = cv2.bitwise_and(colour_image,colour_image, mask= mask)
# cv2.circle(masked_frame,(300,300),2,(0,0,255),3)
# cv2.namedWindow('Test', cv2.WINDOW_NORMAL)
# cv2.imshow('Test', masked_frame)
# hsv_val = hsv_frame[300,300]
# print(f"HSV at point: {hsv_val}")
return [centroid_X,centroid_Y], True
def convert_to_robot_frame(self, coord, true_position = False):
#Realsense should face ground with "Up" towards robots X axis (forwards)
xa = -coord[1]
ya = -coord[0]
if true_position:
return [xa, ya]
x = xa - self.origin[0]
y = ya - self.origin[1]
print(f"Xc: {coord[0]:.2f}, Yc: {coord[1]:.2f}, Zc: {coord[2]:.2f}, Xr: {x:.2f}, Yr: {y:.2f}")
return [x,y]
def update_robot_origin(self):
self.origin = [0,0]
updated_origin = self.get_robot_position()
while updated_origin is False:
updated_origin = self.get_robot_position()
self.origin = updated_origin
return
def get_pixel_coordinate_display(self):
frames = self.get_frames()
colour_frame = frames.get_color_frame()
coord, valid = self.get_pixel_coordinate(colour_frame)
if not valid:
return False
colour_image = np.asanyarray(colour_frame.get_data())
cv2.circle(colour_image,(coord[0],coord[1]),2,(0,0,255),3)
#print(coord)
return colour_image
running_flag_test = True
def reset_test(rsloc: RSLocalisation):
import time
time.sleep(10)
while running_flag_test:
rsloc.update_robot_origin()
time.sleep(10)
return
if __name__ == "__main__":
rsloc = RSLocalisation()
import threading
reset_thread = threading.Thread(target=reset_test, args=[rsloc], daemon=True)
reset_thread.start()
while True:
rsloc.get_robot_position()
images = rsloc.get_pixel_coordinate_display()
if images is False:
continue
cv2.namedWindow('Aligned Colour with Object', cv2.WINDOW_NORMAL)
cv2.imshow('Aligned Colour with Object', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
|
Model.py
|
"""
Deze Applicatie verzorgd een koppeling tussen:
* Radio Audtomatisering
* MySQL Database
* FTP Server
Daarnaast word een XML gegenereerd voor koppeling Website -> Fileserver
Deel van commentaar volgt in het Engels, statussen worden in het Nederlands gegeven.
"""
__version__ = '1.2'
__author__ = 'Sander Post'
import schedule
import ftplib
import mysql.connector
import datetime
import tzlocal
import threading
import xml.etree.ElementTree
import os
from time import sleep
class Model:
# Define class variables
status = 'Initialiseren..'
time = 'Initialiseren..'
xml_handle = "C:\\Users\\Gebruiker\\Desktop\\Executable Uitzending Gemist\\uitzendingen.xml"
ftp_username = "" # username
ftp_password = "" # password
ftp_server = "" # server
sql_username = "" # username
sql_password = "" # password
sql_server = "" # server
sql_database = "" # databasename
path = '\\\\ZENLEX-SERVER\\Zenlex\\Item Collectie\\herhaaluren\\'
timezone = tzlocal.get_localzone() # Local timezone used for datetime conversions
# Run a method in a different thread
# @staticmethod
def run_threaded(self, job_func):
try:
job_thread = threading.Thread(target=job_func)
job_thread.start()
except Exception:
pass
# Return current weeknumber
@staticmethod
def get_week():
weeknumber = datetime.datetime.today().isocalendar()[1]
return weeknumber
# Print the XML tree
def print_xml(self):
tree = xml.etree.ElementTree.parse(self.xml_handle)
root = tree.getroot()
for week in root:
for day in week:
for program in day:
for detail in program:
print(detail.text)
# =============================================================================
# XML Handling
# =============================================================================
# Update XML
# Args: param1 (int) search week
# param2 (str) search day
# param3 (str) search program
# param4 (datetime) search hour
# param5 (str) database filename inside url
def update_xml(self, s_week, s_day, s_program, s_hour, url):
tree = xml.etree.ElementTree.parse(self.xml_handle)
root = tree.getroot()
# First case let's assume everything exists
try:
for week in root:
if week.get('weeknummer') == str(s_week):
for day in week:
# Let's check if the day in this week exists
if day.tag == str(s_day):
for program in day:
# Let's loop through all our programs
for detail in program:
# Need to find the right hour
if detail.tag == str('tijd') and detail.text == str(s_hour):
# If I exist exit all loops
raise StopIteration
# Let's assume this hour hasn't been added yet
else:
attrib = {}
programma_element = day.makeelement('programma', attrib)
naam_element = programma_element.makeelement('naam', attrib)
url_element = programma_element.makeelement('url', attrib)
tijd_element = programma_element.makeelement('tijd', attrib)
naam_element.text = str(s_program)
url_element.text = str(url)
tijd_element.text = str(s_hour)
programma_element.append(naam_element)
programma_element.append(url_element)
programma_element.append(tijd_element)
day.append(programma_element)
tree.write(self.xml_handle)
raise StopIteration
# Let's assume the day in this week doesn't exist
else:
self.write_day(tree, week, s_week, s_day, s_program, s_hour, url)
# If I exist exit all the loops
raise StopIteration
# Week element does not exist (probably start new week)
else:
self.write_week(root, tree, s_week, s_day, s_program, s_hour, url)
# If I exist exit all loops
raise StopIteration
except StopIteration:
self.status = 'XML is bijgewerkt'
pass
# Write a week element to the root node
# Args: param1 (element) root node
# param2 (element) tree XML filehandle
# param3 (int) search week
# param4 (str) search day
# param5 (str) search program
# param6 (datetime) search hour
# param7 (str) database filename inside url
def write_week(self, root, tree, week, s_day, s_program, s_hour, url):
attrib = {'weeknummer': str(week)}
newweek = root.makeelement('week', attrib)
root.append(newweek)
tree.write(self.xml_handle)
self.update_xml(week, s_day, s_program, s_hour, url)
# Write a day element to the week node
# Args: param1 (element) tree XML filehandle
# param2 (element) week node
# param3 (int) search week
# param4 (str) search day
# param5 (str) search program
# param6 (datetime) search hour
# param7 (str) database filename inside url
def write_day(self, tree, week, s_week, dag, s_program, s_hour, url):
attrib = {}
element = week.makeelement(dag, attrib)
week.append(element)
tree.write(self.xml_handle)
self.update_xml(s_week, dag, s_program, s_hour, url)
# =============================================================================
# FTP Handling
# =============================================================================
# Write the audio file to the FTP server
# Creates subdirectory if it doesn't exist
# Args: param1 (str) programname
# param2 (str) local filename
# param3 (str) database name of the file
def ftp_upload(self, programma, filename, dbfilename):
filehandle = open(self.path + filename + '.mp3', 'rb')
session = ftplib.FTP(self.ftp_server, self.ftp_username, self.ftp_password)
try:
self.status = 'Verbonden met FTP server'
session.cwd('Uitzendingen/' + programma)
except ftplib.error_perm:
session.mkd('Uitzendingen/' + programma)
session.cwd('Uitzendingen/' + programma)
self.status = 'Nieuwe map voor ' + programma + ' aangemaakt'
CHUNKSIZE = 100000
session.storbinary('STOR ' + (dbfilename + '.mp3'), filehandle, blocksize=CHUNKSIZE)
self.status = filename + ' met succes opgelsagen onder de volgende naam: ' + dbfilename
filehandle.close()
session.quit()
# =============================================================================
# FTP Upload XML
# =============================================================================
def xml_upload(self):
file = 'uitzendingen.xml'
filehandle = open('C:\\Users\\Gebruiker\\Desktop\\Executable Uitzending Gemist\\uitzendingen.xml', 'rb')
session = ftplib.FTP(self.ftp_server, self.ftp_username, self.ftp_password)
CHUNKSIZE = 100000
try:
self.status = 'Verbonden met FTP server'
session.cwd('Uitzendingen/')
session.storbinary('STOR ' + file, filehandle, blocksize=CHUNKSIZE)
self.status = 'XML met succes geupdate'
except:
pass
filehandle.close()
session.quit()
# =============================================================================
# SQL Handling
# =============================================================================
# This method checks weither an hour on a day exists in the database
# Returns: str: programname witch matched our day and hour
def link_program(self):
session = mysql.connector.connect(user=self.sql_username, password=self.sql_password,
database=self.sql_database, host=self.sql_server)
cursor = session.cursor()
query = "SELECT programmanaam, uur, dag FROM uitzendingen"
cursor.execute(query)
results = cursor.fetchall()
for row in results:
if str(row[1]) == (datetime.datetime.now(self.timezone) - datetime.timedelta(hours=1)).strftime('%H:00:00')\
and str(row[2]) == (datetime.datetime.now(self.timezone).strftime('%w')):
cursor.close()
session.close()
return row[0]
else:
continue
else:
pass
# =============================================================================
# This method prints time and status
# Used to show program activity
def update_status(self):
unix_timestamp = datetime.datetime.now().timestamp()
local_time = datetime.datetime.fromtimestamp(unix_timestamp, self.timezone)
r_var = datetime.datetime.strftime(local_time, '%H:%M:%S')
# No status to show here. Just using this to update the UI.
self.time = r_var
self.status = 'Wachten op volgende uitzending...'
# This method create our database file name
# Args: param1 (str) local filename
# Returns: (str) database filename
def create_file_details(self, filename):
datehandle = datetime.datetime.fromtimestamp(os.path.getmtime(self.path + filename + '.mp3'),
self.timezone).strftime('%d%m%Y')
timehandle = datetime.datetime.strptime(filename[-6:-4], '%H').strftime('%H')
return str(datehandle) + str(timehandle)
# This method checks weither the file exists locally
# Args: param1 (str) local filename
# Returns: (bool) true if exists false otherwise
def check_if_exists(self, filename):
if os.path.isfile(self.path + filename + '.mp3'):
self.status = 'Het volgende item: ' + filename + '.mp3 gevonden'
return True
else:
self.status = 'Het item: ' + filename + '.mp3 is helaas niet gevonden'
return False
# This method links daynumber with Dutch full weekname
# Returns: (str) full Dutch weekname
def get_day(self):
day_x = int(datetime.datetime.now(self.timezone).strftime('%w'))
if day_x == 0:
return 'Zondag'
elif day_x == 1:
return 'Maandag'
elif day_x == 2:
return 'Dinsdag'
elif day_x == 3:
return 'Woensdag'
elif day_x == 4:
return 'Donderdag'
elif day_x == 5:
return 'Vrijdag'
elif day_x == 6:
return 'Zaterdag'
# =============================================================================
# 'Do Something' Method
# =============================================================================
# This method does the hourly search for ftp upload and xml update
# Kind of 'Come Together - Beatles'
def hour_search(self):
# Not the efficient way of waiting
# But wait to run job every hour XX:02
self.status = 'Twee minuten wachten tot afronden programma opname automatisering'
# Let's do the upload job
# First get current hour - 1
hour_x = (datetime.datetime.now(self.timezone) - datetime.timedelta(hours=1)).strftime('%H')
# Get the dutch translation of the day name to check filename
day_name = self.get_day()
# Get the weeknumber to update XML (USE ISO8601 ELSE: %U)
weeknumber = (datetime.datetime.now(self.timezone).strftime('%V'))
filename = 'Programma Opname; ' + day_name + ' ' + hour_x + ' uur'
# If this audio file exists let's invoke the uploading session
if self.check_if_exists(filename):
if not self.link_program():
pass
else:
self.ftp_upload(programma=self.link_program(), filename=filename,
dbfilename=self.create_file_details(filename))
self.update_xml(weeknumber, day_name, self.link_program(),
hour_x, ('https://unwdmi.nl:60000/uitzendingen/' + (self.link_program()) + '/' +
self.create_file_details(filename) + '.mp3'))
else:
pass
# Clear status
self.status = ''
# =============================================================================
# Schedule all my jobs
# =============================================================================
def job_scheduler(self):
schedule.every(5).seconds.do(self.update_status)
schedule.every().hour.at(':02').do(self.hour_search)
schedule.every().hour.at(':08').do(self.xml_upload)
while True:
schedule.run_pending()
sleep(1)
|
server3.py
|
################################################################################
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#from itertools import izip
from random import normalvariate, random
from datetime import timedelta, datetime
import csv
import dateutil.parser
import os.path
import operator
import json
import re
import threading
#from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
import http.server
from socketserver import ThreadingMixIn
################################################################################
#
# Config
# Sim params
REALTIME = True
SIM_LENGTH = timedelta(days = 365 * 5)
MARKET_OPEN = datetime.today().replace(hour = 0, minute = 30, second = 0)
# Market parms
# min / max / std
SPD = (2.0, 6.0, 0.1)
PX = (60.0, 150.0, 1)
FREQ = (12, 36, 50)
# Trades
OVERLAP = 4
################################################################################
#
# Test Data
def bwalk(min, max, std):
""" Generates a bounded random walk. """
rng = max - min
while True:
max += normalvariate(0, std)
yield abs((max % (rng * 2)) - rng) + min
def market(t0 = MARKET_OPEN):
""" Generates a random series of market conditions,
(time, price, spread).
"""
for hours, px, spd in zip(bwalk(*FREQ), bwalk(*PX), bwalk(*SPD)):
yield t0, px, spd
t0 += timedelta(hours = abs(hours))
def orders(hist):
""" Generates a random set of limit orders (time, side, price, size) from
a series of market conditions.
"""
for t, px, spd in hist:
stock = 'ABC' if random() > 0.5 else 'DEF'
side, d = ('sell', 2) if random() > 0.5 else ('buy', -2)
order = round(normalvariate(px + (spd / d), spd / OVERLAP), 2)
size = int(abs(normalvariate(0, 100)))
yield t, stock, side, order, size
################################################################################
#
# Order Book
def add_book(book, order, size, _age = 10):
""" Add a new order and size to a book, and age the rest of the book. """
yield order, size, _age
for o, s, age in book:
if age > 0:
yield o, s, age - 1
def clear_order(order, size, book, op = operator.ge, _notional = 0):
""" Try to clear a sized order against a book, returning a tuple of
(notional, new_book) if successful, and None if not. _notional is a
recursive accumulator and should not be provided by the caller.
"""
(top_order, top_size, age), tail = book[0], book[1:]
if op(order, top_order):
_notional += min(size, top_size) * top_order
sdiff = top_size - size
if sdiff > 0:
return _notional, list(add_book(tail, top_order, sdiff, age))
elif len(tail) > 0:
return clear_order(order, -sdiff, tail, op, _notional)
def clear_book(buy = None, sell = None):
""" Clears all crossed orders from a buy and sell book, returning the new
books uncrossed.
"""
while buy and sell:
order, size, _ = buy[0]
new_book = clear_order(order, size, sell)
if new_book:
sell = new_book[1]
buy = buy[1:]
else:
break
return buy, sell
def order_book(orders, book, stock_name):
""" Generates a series of order books from a series of orders. Order books
are mutable lists, and mutating them during generation will affect the
next turn!
"""
for t, stock, side, order, size in orders:
if stock_name == stock:
new = add_book(book.get(side, []), order, size)
book[side] = sorted(new, reverse = side == 'buy', key = lambda x: x[0])
bids, asks = clear_book(**book)
yield t, bids, asks
################################################################################
#
# Test Data Persistence
def generate_csv():
""" Generate a CSV of order history. """
with open('test.csv', 'w') as f:
writer = csv.writer(f)
for t, stock, side, order, size in orders(market()):
if t > MARKET_OPEN + SIM_LENGTH:
break
writer.writerow([t, stock, side, order, size])
def read_csv():
""" Read a CSV or order history into a list. """
with open('test.csv', 'rt') as f:
for time, stock, side, order, size in csv.reader(f):
yield dateutil.parser.parse(time), stock, side, float(order), int(size)
################################################################################
#
# Server
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
""" Boilerplate class for a multithreaded HTTP Server, with working
shutdown.
"""
allow_reuse_address = True
def shutdown(self):
""" Override MRO to shutdown properly. """
self.socket.close()
http.server.HTTPServer.shutdown(self)
def route(path):
""" Decorator for a simple bottle-like web framework. Routes path to the
decorated method, with the rest of the path as an argument.
"""
def _route(f):
setattr(f, '__route__', path)
return f
return _route
def read_params(path):
""" Read query parameters into a dictionary if they are parseable,
otherwise returns None.
"""
query = path.split('?')
if len(query) > 1:
query = query[1].split('&')
return dict(map(lambda x: x.split('='), query))
def get(req_handler, routes):
""" Map a request to the appropriate route of a routes instance. """
for name, handler in routes.__class__.__dict__.items():
if hasattr(handler, "__route__"):
if None != re.search(handler.__route__, req_handler.path):
req_handler.send_response(200)
req_handler.send_header('Content-Type', 'application/json')
req_handler.send_header('Access-Control-Allow-Origin', '*')
req_handler.end_headers()
params = read_params(req_handler.path)
data = json.dumps(handler(routes, params)) + '\n'
req_handler.wfile.write(bytes(data, encoding = 'utf-8'))
return
def run(routes, host = '0.0.0.0', port = 8080):
""" Runs a class as a server whose methods have been decorated with
@route.
"""
class RequestHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, *args, **kwargs):
pass
def do_GET(self):
get(self, routes)
server = ThreadedHTTPServer((host, port), RequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
print ('HTTP server started on port 8080')
while True:
from time import sleep
sleep(1)
server.shutdown()
server.start()
server.waitForThread()
################################################################################
#
# App
ops = {
'buy': operator.le,
'sell': operator.ge,
}
class App(object):
""" The trading game server application. """
def __init__(self):
self._book_1 = dict()
self._book_2 = dict()
self._data_1 = order_book(read_csv(), self._book_1, 'ABC')
self._data_2 = order_book(read_csv(), self._book_2, 'DEF')
self._rt_start = datetime.now()
self._sim_start, _, _ = next(self._data_1)
self.read_10_first_lines()
@property
def _current_book_1(self):
for t, bids, asks in self._data_1:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
@property
def _current_book_2(self):
for t, bids, asks in self._data_2:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
def read_10_first_lines(self):
for _ in iter(range(10)):
next(self._data_1)
next(self._data_2)
@route('/query')
def handle_query(self, x):
""" Takes no arguments, and yields the current top of the book; the
best bid and ask and their sizes
"""
try:
t1, bids1, asks1 = next(self._current_book_1)
t2, bids2, asks2 = next(self._current_book_2)
except Exception as e:
print ("error getting stocks...reinitalizing app")
self.__init__()
t1, bids1, asks1 = next(self._current_book_1)
t2, bids2, asks2 = next(self._current_book_2)
t = t1 if t1 > t2 else t2
print ('Query received @ t%s' % t)
return [{
'id': x and x.get('id', None),
'stock': 'ABC',
'timestamp': str(t),
'top_bid': bids1 and {
'price': bids1[0][0],
'size': bids1[0][1]
},
'top_ask': asks1 and {
'price': asks1[0][0],
'size': asks1[0][1]
}
},
{
'id': x and x.get('id', None),
'stock': 'DEF',
'timestamp': str(t),
'top_bid': bids2 and {
'price': bids2[0][0],
'size': bids2[0][1]
},
'top_ask': asks2 and {
'price': asks2[0][0],
'size': asks2[0][1]
}
}]
################################################################################
#
# Main
if __name__ == '__main__':
if not os.path.isfile('test.csv'):
print ("No data found, generating...")
generate_csv()
run(App())
|
device.py
|
from time import sleep
from typing import List
from importlib import import_module
from multiprocessing import Process
import sys
sys.path.append('config/scripts/')
DeviceList = List[str]
class DeviceNotFoundException(Exception):
pass
class Device(object):
def __init__(self, name: str, script_name: str):
self._running = True
# Initialize midi library
from pygame import midi
self._midi = midi
self._midi.init()
if script_name.endswith('.py'):
script_name = script_name[:-3]
if script_name.endswith('.pyc'):
script_name = script_name[:-4]
self._module = import_module(script_name)
self._settings = import_module('settings')
# Iterate over all midi devices
self._input_id = None
self._output_id = None
for x in range(0, self._midi.get_count()):
# Check for name and if output or input
if self._midi.get_device_info(x)[1].decode('ascii') == name and self._midi.get_device_info(x)[2] == 1:
self._input_id = x
self._input_device = self._midi.Input(x)
self._settings.input_device = self._input_device
if self._midi.get_device_info(x)[1].decode('ascii') == name and self._midi.get_device_info(x)[3] == 1:
self._output_id = x
self._output_device = self._midi.Output(x)
self._settings.output_device = self._output_device
if not self._output_id or not self._input_id:
raise DeviceNotFoundException('No midi device with the name %s found.' % (name,))
def close(self):
self._running = False
sleep(0.02)
self._input_device.close()
self._output_device.close()
self._midi.quit()
def read(self):
while self._running:
if self._input_device.poll():
events = self._input_device.read(100)
for event in events:
# process = Process(target=self._module.execute, args=(self.quit, self._output_device, event[0][0], event[0][1], event[0][2], event[0][3]))
# process.daemon = True
# process.start()
self._module.execute(self.close, self._settings, event[0][0], event[0][1], event[0][2], event[0][3])
sleep(0.01)
@staticmethod
def get_devices() -> DeviceList:
from pygame import midi
midi.init()
names = [midi.get_device_info(x)[1].decode('ascii') for x in range(0, midi.get_count())]
midi.quit()
return names
|
commands.py
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010,2015, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import Queue
import types
import getopt
import inspect
import threading
import multiprocessing
from . import callbacks, conf, ircdb, ircmsgs, ircutils, log, utils, world
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class ProcessTimeoutError(Exception):
"""Gets raised when a process is killed due to timeout."""
pass
def process(f, *args, **kwargs):
"""Runs a function <f> in a subprocess.
Several extra keyword arguments can be supplied.
<pn>, the pluginname, and <cn>, the command name, are strings used to
create the process name, for identification purposes.
<timeout>, if supplied, limits the length of execution of target
function to <timeout> seconds."""
timeout = kwargs.pop('timeout', None)
q = multiprocessing.Queue()
def newf(f, q, *args, **kwargs):
try:
r = f(*args, **kwargs)
q.put(r)
except Exception as e:
q.put(e)
targetArgs = (f, q,) + args
p = callbacks.CommandProcess(target=newf,
args=targetArgs, kwargs=kwargs)
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
raise ProcessTimeoutError, "%s aborted due to timeout." % (p.name,)
try:
v = q.get(block=False)
except Queue.Empty:
v = "Nothing returned."
if isinstance(v, Exception):
v = "Error: " + str(v)
return v
def regexp_wrapper(s, reobj, timeout, plugin_name, fcn_name):
'''A convenient wrapper to stuff regexp search queries through a subprocess.
This is used because specially-crafted regexps can use exponential time
and hang the bot.'''
def re_bool(s, reobj):
"""Since we can't enqueue match objects into the multiprocessing queue,
we'll just wrap the function to return bools."""
if reobj.search(s) is not None:
return True
else:
return False
try:
v = process(re_bool, s, reobj, timeout=timeout, pn=plugin_name, cn=fcn_name)
return v
except ProcessTimeoutError:
return False
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
try:
return int(float(s))
except OverflowError:
raise ValueError('I don\'t understand numbers that large.')
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveOp(irc, msg, args, state, action='do that'):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not even in %s.' % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error('I need to be opped to %s.' % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('channel', args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid('nick or hostmask', args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
getChannel(irc, msg, args, state)
channel = state.channel
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
_ = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
raise ValueError
except (ValueError, IndexError):
args[:] = original
state.errorInvalid('regular expression', s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0]):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid('nick', args[0],
'That nick is too long for this server.')
state.args.append(args.pop(0))
else:
state.errorInvalid('nick', args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
_ = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = 'I haven\'t seen %s.' % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if state.channel:
return
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not in %s.' % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error('This command may only be given in a channel that I am in.',
Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error('You must be in %s.' % channel, Raise=True)
else:
state.error('I\'m not in %s.' % channel, Raise=True)
else:
state.errorInvalid('channel', args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error('%s is not in %s.' % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def checkChannelCapability(irc, msg, args, state, cap):
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = 'You must not give the empty string as an argument.'
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, errorMsg=None):
def p(s):
return len(s.split(None, 1)) == 1
if errorMsg is None:
errorMsg='You must not give a string containing spaces as an argument.'
getSomething(irc, msg, args, state, errorMsg=errorMsg, p=p)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = 'This message must be sent in a channel.'
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('url', args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('email', args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid('http url', args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid('command name', args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('ip', args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid('letter', args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveOp': getHaveOp,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
self.getoptL.append(name)
self.getopts[name] = None
else:
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, '', self.getoptL)
getopts = []
for (opt, arg) in optlist:
opt = opt[2:] # Strip --
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError, attr
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def wrap(f, specList=[], name=None, **kw):
name = name or f.func_name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.func_code
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
raise
return utils.python.changeFunctionName(newf, name, f.__doc__)
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap', 'process', 'regexp_wrapper',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
job.py
|
import threading
import traceback
from util.array import unzip, none
from util.collection import for_each
from util.error import AlreadyRunning, CancelledError
[
PENDING,
RUNNING,
FINISHED,
CANCELLED
] = range(4)
TIMEOUT_ERROR = 'TimeoutError'
CANCELLED_ERROR = 'CancelledError'
class _Waiter(object):
def __init__(self):
self.finished_jobs = []
self.event = threading.Event()
def add_result(self, future):
self.finished_jobs.append(future)
class _NCompletedWaiter(_Waiter):
def __init__(self, pending_calls):
super().__init__()
self.lock = threading.Lock()
self.pending_calls = pending_calls
def _decrement_pending_calls(self):
with self.lock:
self.pending_calls -= 1
if not self.pending_calls:
self.event.set()
def add_result(self, job):
super().add_result(job)
self._decrement_pending_calls()
class _AcquireJobs(object):
def __init__(self, jobs):
self.jobs = sorted(jobs, key=id)
def __enter__(self):
for job in self.jobs:
job._condition.acquire()
def __exit__(self, *args):
for job in self.jobs:
job._condition.release()
def n_completed(jobs, count, timeout=None):
with _AcquireJobs(jobs):
done = set(j for j in jobs if j._state > RUNNING)
not_done = set(jobs) - done
count = min(count - len(done), len(not_done))
if count > 0:
waiter = _NCompletedWaiter(count)
for_each(not_done, lambda j: j._waiters.append(waiter))
else:
return done
waiter.event.wait(timeout)
for job in not_done:
with job._condition:
job._waiters.remove(waiter)
done.update(waiter.finished_jobs)
return done
def first_completed(jobs, timeout=None):
return n_completed(jobs, 1, timeout)
def all_completed(jobs, timeout=None):
return n_completed(jobs, len(jobs), timeout)
class Job:
def __init__(self, context, job_id):
self.job_id = job_id
self.context = context
self._indexes = []
self._futures = []
self._handled = []
self._results = []
self._waiters = []
self._state = PENDING
self._condition = threading.Condition()
self._processor = threading.Thread(
name=f'JobManagerThread {job_id}',
target=self._process, args=(context,)
)
def _get_active_futures(self):
return [
self._futures[i]
for i in range(len(self._handled))
if not self._handled[i]
]
def _get_completed_values(self):
return [result[2] for result in self._results if result]
def _handle_future(self, future, i=None):
with self._condition:
i = i or self._futures.index(future)
try:
result = future.result(timeout=0)
for j, index in enumerate(self._indexes[i]):
self._results[index] = result[j]
except Exception as e:
if type(e).__name__ != CANCELLED_ERROR:
print(f'Exception in task {i}: ', repr(e))
print(traceback.format_exc())
self._handled[i] = True
def _process(self, context):
fn = context.function.get_function()
data = context.function.prepare_data(
context.state,
context.instance,
context.backdoor,
context.dim_type,
)
awaiter = context.executor.get_awaiter()
completed = []
tasks = context.get_tasks(self._results)
while self.running() and len(tasks) > 0:
index_futures = context.executor.submit_all(fn, data, *tasks)
indexes, futures = unzip(index_futures)
is_reasonably = True
with self._condition:
self._indexes.extend(indexes)
self._futures.extend(futures)
self._results.extend(none(tasks))
self._handled.extend(none(futures))
active = self._get_active_futures()
while len(active) > 0 and is_reasonably:
count, timeout = context.get_limits(completed, len(self._results))
for future in awaiter(active, timeout):
self._handle_future(future)
active = self._get_active_futures()
completed = self._get_completed_values()
is_reasonably = context.is_reasonably(active, completed)
tasks = context.get_tasks(self._results)
with self._condition:
if self._state == RUNNING:
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
def start(self):
if self._state != PENDING:
raise AlreadyRunning()
self._state = RUNNING
self._processor.start()
return self
def cancel(self):
with self._condition:
if self._state == FINISHED:
return False
if self._state == RUNNING:
self._state = CANCELLED
for future in self._futures:
future.cancel()
self._condition.notify_all()
if self._processor is not None:
self._processor.join()
self._processor = None
return True
def cancelled(self):
with self._condition:
return self._state == CANCELLED
def running(self):
with self._condition:
return self._state == RUNNING
def done(self):
with self._condition:
return self._state > RUNNING
def result(self, timeout):
with self._condition:
if self._state == CANCELLED:
raise CancelledError()
elif self._state == FINISHED:
return self._results
self._condition.wait(timeout)
if self._state == CANCELLED:
raise CancelledError()
elif self._state == FINISHED:
return self._results
else:
raise TimeoutError()
|
locators.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020
# Author: xiaoweixiang
#
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if 'digests' in info:
digests = info['digests']
for algo in ('sha256', 'md5'):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
test_context.py
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import mock
import threading
from .base import BaseTestCase
from tests.test_tracer import get_dummy_tracer
import pytest
from oteltrace.span import Span
from oteltrace.context import Context
from oteltrace.constants import HOSTNAME_KEY
from oteltrace.ext.priority import USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP
@pytest.fixture
def tracer_with_debug_logging():
# All the tracers, dummy or not, shares the same logging object.
tracer = get_dummy_tracer()
level = tracer.log.level
tracer.log.setLevel(logging.DEBUG)
try:
yield tracer
finally:
tracer.log.setLevel(level)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans(log, tracer_with_debug_logging):
# when the root parent is finished, notify if there are spans still pending
tracer = tracer_with_debug_logging
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
unfinished_spans_log = log.call_args_list[-3][0][2]
child_1_log = log.call_args_list[-2][0][1]
child_2_log = log.call_args_list[-1][0][1]
assert 2 == unfinished_spans_log
assert 'name child_1' in child_1_log
assert 'name child_2' in child_2_log
assert 'duration 0.000000s' in child_1_log
assert 'duration 0.000000s' in child_2_log
class TestTracingContext(BaseTestCase):
"""
Tests related to the ``Context`` class that hosts the trace for the
current execution flow.
"""
@contextlib.contextmanager
def override_partial_flush(self, ctx, enabled, min_spans):
original_enabled = ctx._partial_flush_enabled
original_min_spans = ctx._partial_flush_min_spans
ctx._partial_flush_enabled = enabled
ctx._partial_flush_min_spans = min_spans
try:
yield
finally:
ctx._partial_flush_enabled = original_enabled
ctx._partial_flush_min_spans = original_min_spans
def test_add_span(self):
# it should add multiple spans
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
assert 1 == len(ctx._trace)
assert 'fake_span' == ctx._trace[0].name
assert ctx == span.context
def test_context_sampled(self):
# a context is sampled if the spans are sampled
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
span.finish()
trace, sampled = ctx.get()
assert sampled is True
assert ctx.sampling_priority is None
def test_context_priority(self):
# a context is sampled if the spans are sampled
ctx = Context()
for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]:
ctx.sampling_priority = priority
span = Span(tracer=None, name=('fake_span_%s' % repr(priority)))
ctx.add_span(span)
span.finish()
# It's "normal" to have sampled be true even when priority sampling is
# set to 0 or -1. It would stay false even even with priority set to 2.
# The only criteria to send (or not) the spans to the agent should be
# this "sampled" attribute, as it's tightly related to the trace weight.
assert priority == ctx.sampling_priority
trace, sampled = ctx.get()
assert sampled is True, 'priority has no impact on sampled status'
def test_current_span(self):
# it should return the current active span
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
assert span == ctx.get_current_span()
def test_current_root_span_none(self):
# it should return none when there is no root span
ctx = Context()
assert ctx.get_current_root_span() is None
def test_current_root_span(self):
# it should return the current active root span
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
assert span == ctx.get_current_root_span()
def test_close_span(self):
# it should keep track of closed spans, moving
# the current active to it's parent
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
assert ctx.get_current_span() is None
def test_get_trace(self):
# it should return the internal trace structure
# if the context is finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
span.finish()
trace, sampled = ctx.get()
assert [span] == trace
assert sampled is True
# the context should be empty
assert 0 == len(ctx._trace)
assert ctx._current_span is None
def test_get_trace_empty(self):
# it should return None if the Context is not finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
trace, sampled = ctx.get()
assert trace is None
assert sampled is None
@mock.patch('oteltrace.internal.hostname.get_hostname')
def test_get_report_hostname_enabled(self, get_hostname):
get_hostname.return_value = 'test-hostname'
with self.override_global_config(dict(report_hostname=True)):
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
span.finish()
# Assert that we have not added the tag to the span yet
assert span.get_tag(HOSTNAME_KEY) is None
# Assert that retrieving the trace sets the tag
trace, _ = ctx.get()
assert trace[0].get_tag(HOSTNAME_KEY) == 'test-hostname'
assert span.get_tag(HOSTNAME_KEY) == 'test-hostname'
@mock.patch('oteltrace.internal.hostname.get_hostname')
def test_get_report_hostname_disabled(self, get_hostname):
get_hostname.return_value = 'test-hostname'
with self.override_global_config(dict(report_hostname=False)):
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
span.finish()
# Assert that we have not added the tag to the span yet
assert span.get_tag(HOSTNAME_KEY) is None
# Assert that retrieving the trace does not set the tag
trace, _ = ctx.get()
assert trace[0].get_tag(HOSTNAME_KEY) is None
assert span.get_tag(HOSTNAME_KEY) is None
@mock.patch('oteltrace.internal.hostname.get_hostname')
def test_get_report_hostname_default(self, get_hostname):
get_hostname.return_value = 'test-hostname'
# Create a context and add a span and finish it
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
span.finish()
# Assert that we have not added the tag to the span yet
assert span.get_tag(HOSTNAME_KEY) is None
# Assert that retrieving the trace does not set the tag
trace, _ = ctx.get()
assert trace[0].get_tag(HOSTNAME_KEY) is None
assert span.get_tag(HOSTNAME_KEY) is None
def test_partial_flush(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have just enough finished spans to flush
We return the finished spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child.finished = True
ctx.add_span(child)
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNotNone(trace)
self.assertIsNotNone(sampled)
self.assertEqual(len(trace), 5)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Ensure we clear/reset internal stats as expected
self.assertEqual(ctx._trace, [root])
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
def test_partial_flush_too_many(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have more than the minimum number of spans needed to flush
We return the finished spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child.finished = True
ctx.add_span(child)
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=1):
trace, sampled = ctx.get()
self.assertIsNotNone(trace)
self.assertIsNotNone(sampled)
self.assertEqual(len(trace), 5)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Ensure we clear/reset internal stats as expected
self.assertEqual(ctx._trace, [root])
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
def test_partial_flush_too_few(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we do not have enough finished spans to flush
We return no spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child.finished = True
ctx.add_span(child)
ctx.close_span(child)
# Test with having 1 too few spans for partial flush
with self.override_partial_flush(ctx, enabled=True, min_spans=6):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
self.assertEqual(len(ctx._trace), 6)
self.assertEqual(
set(['root', 'child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in ctx._trace])
)
def test_partial_flush_remaining(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have some unfinished spans
We keep the unfinished spans around
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(10):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(child)
# CLose the first 5 only
if i < 5:
child.finished = True
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
# Assert partially flushed spans
self.assertTrue(len(trace), 5)
self.assertIsNotNone(sampled)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Assert remaining unclosed spans
self.assertEqual(len(ctx._trace), 6)
self.assertEqual(
set(['root', 'child_5', 'child_6', 'child_7', 'child_8', 'child_9']),
set([span.name for span in ctx._trace]),
)
def test_finished(self):
# a Context is finished if all spans inside are finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_disabled(self, log):
# the trace finished status logging is disabled
tracer = get_dummy_tracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert 'the trace has %d unfinished spans' not in msg
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_when_ok(self, log):
# if the unfinished spans logging is enabled but the trace is finished, don't log anything
tracer = get_dummy_tracer()
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
# close the trace
child.finish()
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
assert 'the trace has %d unfinished spans' not in msg
def test_thread_safe(self):
# the Context must be thread-safe
ctx = Context()
def _fill_ctx():
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
assert 100 == len(ctx._trace)
def test_clone(self):
ctx = Context()
ctx.sampling_priority = 2
# manually create a root-child trace
root = Span(tracer=None, name='root')
child = Span(tracer=None, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
cloned_ctx = ctx.clone()
assert cloned_ctx._parent_trace_id == ctx._parent_trace_id
assert cloned_ctx._parent_span_id == ctx._parent_span_id
assert cloned_ctx._sampling_priority == ctx._sampling_priority
assert cloned_ctx._otel_origin == ctx._otel_origin
assert cloned_ctx._current_span == ctx._current_span
assert cloned_ctx._trace == []
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook, VppDiedError
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from vpp_papi.vpp_stats import VPPStats
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
getLogger, colorize
from vpp_object import VppObjectRegistry
from util import ppp
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
debug_framework = False
if os.getenv('TEST_DEBUG', "0") == "1":
debug_framework = True
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
s = os.getenv("EXTENDED_TESTS", "n")
return True if s.lower() in ("y", "yes", "1") else False
def running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if hasattr(self, '_pipe'):
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = test.__name__
else:
desc = test.shortDescription()
if not desc:
desc = str(test)
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@classmethod
def get_least_used_cpu(self):
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
s = os.getenv("STEP", "n")
cls.step = True if s.lower() in ("y", "yes", "1") else False
d = os.getenv("DEBUG", None)
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "}", "api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"cpu", "{", "main-core", str(cpu_core_number), "}",
"statseg", "{", "socket-name", cls.stats_sock, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{",
"disable", "}", "plugin", "unittest_plugin.so",
"{", "enable", "}", "}", ]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except Exception as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def wait_for_stats_socket(cls):
deadline = time.time() + 3
while time.time() < deadline or cls.debug_gdb or cls.debug_gdbserver:
if os.path.exists(cls.stats_sock):
break
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
random.seed()
if not hasattr(cls, 'logger'):
cls.logger = getLogger(cls.__name__)
else:
cls.logger.name = cls.__name__
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
read_timeout = 0
else:
read_timeout = 5
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
read_timeout)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.wait_for_stats_socket()
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except Exception:
try:
cls.vapi.disconnect()
except Exception:
pass
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except Exception:
try:
cls.quit()
except Exception:
pass
raise
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.kill()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.reporter.send_keep_alive(self)
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend / len(padding)) + 1
packet[Raw].load += (padding * num)[:extend]
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(str(packet))
self.logger.debug(
ppp("Verifying packet checksums for packet:", received))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(str(received))
while True:
layer = temp.getlayer(counter)
if layer:
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(layer, cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(str(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(str(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(str(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.vapi.cli("clear trace")
intf.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, input, pkts, output):
self.vapi.cli("clear trace")
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = output.get_capture(len(pkts))
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
# TODO: if none print warning not raise exception
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCasePrinter(object):
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_test_case_set"):
self._test_case_set = set()
def print_test_case_heading_if_first_time(self, case):
if case.__class__ not in self._test_case_set:
print(double_line_delim)
print(colorize(get_testcase_doc_name(case), GREEN))
print(double_line_delim)
self._test_case_set.add(case.__class__)
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
def __init__(self, stream, descriptions, verbosity):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.printer = TestCasePrinter()
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSuccess() %s.%s(%s) called"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc,
reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self, test):
logger = None
if hasattr(test, 'logger'):
logger = test.logger
if hasattr(test, 'tempdir'):
try:
failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
link_path = os.path.join(failed_dir, '%s-FAILED' %
os.path.basename(test.tempdir))
if logger:
logger.debug("creating a link to the failed test")
logger.debug("os.symlink(%s, %s)" %
(test.tempdir, link_path))
if os.path.exists(link_path):
if logger:
logger.debug('symlink already exists')
else:
os.symlink(test.tempdir, link_path)
except Exception as e:
if logger:
logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addFailure(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("FAIL", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
self.send_result_through_pipe(test, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addError(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("ERROR", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
self.send_result_through_pipe(test, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
self.printer.print_test_case_heading_if_first_time(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
reporter = KeepAliveReporter()
reporter.pipe = keep_alive_pipe
VppTestResult.test_framework_result_pipe = result_pipe
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
return result
class Worker(Thread):
def __init__(self, args, logger, env={}):
self.logger = logger
self.args = args
self.result = None
self.env = copy.deepcopy(env)
super(Worker, self).__init__()
def run(self):
executable = self.args[0]
self.logger.debug("Running executable w/args `%s'" % self.args)
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
self.logger.debug("Finished running `%s'" % executable)
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stdout:" % executable)
self.logger.info(single_line_delim)
self.logger.info(out)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stderr:" % executable)
self.logger.info(single_line_delim)
self.logger.info(err)
self.logger.info(single_line_delim)
self.result = self.process.returncode
|
main.py
|
print("importing...")
from config import Configuration
from server import AnylinkServer, SFTPHandler
import threading
from requests_manager import RequestsManager
from account_manager import AccountManager
import readchar
def main():
print("config...")
config = Configuration("/home/orikeidar01/config.json", "anylink")
config.database.set_default_table("anylink")
print("initialize server...")
AnylinkServer.allow_reuse_address = True
server = AnylinkServer(config.bind_addr, config=config)
print("serving...")
sftp_thread = threading.Thread(target=server.serve_forever)
sftp_thread.start()
requests_manager = RequestsManager(SFTPHandler)
account_manager = AccountManager(config.database)
exit = False
while not exit:
print("select action:")
print("1) view connected users")
print("2) open channel with a user")
print("3) obtain file from user")
print("4) create new user")
print("^C) exit")
char = None
while char is None or (
char != '1' and char != '2' and char != '3' and char != '4' and char != readchar.key.CTRL_C):
char = readchar.readkey()
if char == '1':
for u in requests_manager.channels:
print(u)
elif char == '2':
user = input("enter user email: ")
requests_manager.get_channel(user)
elif char == '3':
user = input("enter user email: ")
requests_manager.get_channel(user)
path = input("enter file path: ")
requests_manager.send_file(user, path)
elif char == '4':
user = input("enter user email: ")
passwd = input("enter user password: ")
if account_manager.create_user(user, passwd):
print("user was succefully created")
else:
print("email already exists")
elif char == readchar.key.CTRL_C:
exit = True
server.shutdown()
if __name__ == "__main__":
main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum.util import bh2u, bfh
from electrum import keystore, simple_config
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet
try:
from electrum.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if NetworkConstants.TESTNET else "Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received: Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received: %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mBTC'
if self.decimal_point == 8:
return 'BTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 2 if self.fee_unit else 0)
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, 3)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NoDynamicFeeEstimates:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
if not freeze_fee:
fee = None if self.not_enough_funds else fee
self.fee_e.setAmount(fee)
if not freeze_feerate:
fee_rate = fee // size if fee is not None else None
self.feerate_e.setAmount(fee_rate)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else float(amount)
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Bitcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Bitcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.size_e.setAmount(0)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
if xtype in ['p2wpkh', 'p2wsh', 'p2wpkh-p2sh', 'p2wsh-p2sh']:
vbox.addWidget(WWLabel(_("Warning: the format of private keys associated to segwit addresses may not be compatible with other wallets")))
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
from electrum.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electrum.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
self.fee_unit = self.config.get('fee_unit', 0)
fee_unit_label = HelpLabel(_('Fee Unit') + ':', '')
fee_unit_combo = QComboBox()
fee_unit_combo.addItems([_('sat/byte'), _('mBTC/kB')])
fee_unit_combo.setCurrentIndex(self.fee_unit)
def on_fee_unit(x):
self.fee_unit = x
self.config.set_key('fee_unit', x)
self.fee_slider.update()
fee_unit_combo.currentIndexChanged.connect(on_fee_unit)
fee_widgets.append((fee_unit_label, fee_unit_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTC', 'mBTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1BTC=1000mBTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTC':
self.decimal_point = 8
elif unit_result == 'mBTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
test_index.py
|
import multiprocessing as mp
import os
import time
import unittest
import numpy as np
from jina.enums import FlowOptimizeLevel
from jina.executors.indexers.vector.numpy import NumpyIndexer
from jina.flow import Flow
from jina.main.parser import set_flow_parser
from jina.proto import jina_pb2
from tests import JinaTestCase, random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
def get_result(resp):
n = []
for d in resp.search.docs:
n.append([k.id for k in d.matches])
n = np.array(n)
# each doc should return a list of top-100
np.testing.assert_equal(n.shape[0], 2)
np.testing.assert_equal(n.shape[1], 50)
class DummyIndexer(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, *args, **kwargs):
pass
class DummyIndexer2(NumpyIndexer):
# the add() function is simply copied from NumpyIndexer
def add(self, keys: 'np.ndarray', vectors: 'np.ndarray', *args, **kwargs):
if len(vectors.shape) != 2:
raise ValueError(f'vectors shape {vectors.shape} is not valid, expecting "vectors" to have rank of 2')
if not self.num_dim:
self.num_dim = vectors.shape[1]
self.dtype = vectors.dtype.name
elif self.num_dim != vectors.shape[1]:
raise ValueError(
"vectors' shape [%d, %d] does not match with indexers's dim: %d" %
(vectors.shape[0], vectors.shape[1], self.num_dim))
elif self.dtype != vectors.dtype.name:
raise TypeError(
"vectors' dtype %s does not match with indexers's dtype: %s" %
(vectors.dtype.name, self.dtype))
elif keys.shape[0] != vectors.shape[0]:
raise ValueError('number of key %d not equal to number of vectors %d' % (keys.shape[0], vectors.shape[0]))
elif self.key_dtype != keys.dtype.name:
raise TypeError(
"keys' dtype %s does not match with indexers keys's dtype: %s" %
(keys.dtype.name, self.key_dtype))
self.write_handler.write(vectors.tobytes())
self.key_bytes += keys.tobytes()
self.key_dtype = keys.dtype.name
self._size += keys.shape[0]
class MyTestCase(JinaTestCase):
def tearDown(self) -> None:
super().tearDown()
time.sleep(2)
def test_doc_iters(self):
a = random_docs(3, 5)
for d in a:
print(d)
def test_simple_route(self):
f = Flow().add(uses='_pass')
with f:
f.index(input_fn=random_docs(10))
def test_update_method(self):
a = DummyIndexer(index_filename='test.bin')
a.save()
self.assertFalse(os.path.exists(a.save_abspath))
self.assertFalse(os.path.exists(a.index_abspath))
a.add()
a.save()
self.assertTrue(os.path.exists(a.save_abspath))
self.assertFalse(os.path.exists(a.index_abspath))
self.add_tmpfile(a.save_abspath, a.index_abspath)
b = DummyIndexer2(index_filename='testb.bin')
b.save()
self.assertFalse(os.path.exists(b.save_abspath))
self.assertFalse(os.path.exists(b.index_abspath))
b.add(np.array([1, 2, 3]), np.array([[1, 1, 1], [2, 2, 2]]))
b.save()
self.assertTrue(os.path.exists(b.save_abspath))
self.assertTrue(os.path.exists(b.index_abspath))
self.add_tmpfile(b.save_abspath, b.index_abspath)
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_two_client_route_parallel(self):
fa1 = set_flow_parser().parse_args(['--optimize-level', str(FlowOptimizeLevel.NONE)])
f1 = Flow(fa1).add(uses='_pass', parallel=3)
f2 = Flow(optimize_level=FlowOptimizeLevel.IGNORE_GATEWAY).add(uses='_pass', parallel=3)
def start_client(fl):
fl.index(input_fn=random_docs(10))
with f1:
self.assertEqual(f1.num_peas, 6)
t1 = mp.Process(target=start_client, args=(f1,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f1,))
t2.daemon = True
t1.start()
t2.start()
time.sleep(5)
with f2:
# no optimization can be made because we ignored the gateway
self.assertEqual(f2.num_peas, 6)
t1 = mp.Process(target=start_client, args=(f2,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f2,))
t2.daemon = True
t1.start()
t2.start()
time.sleep(5)
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_two_client_route(self):
f = Flow().add(uses='_pass')
def start_client(fl):
fl.index(input_fn=random_docs(10))
with f:
t1 = mp.Process(target=start_client, args=(f,))
t1.daemon = True
t2 = mp.Process(target=start_client, args=(f,))
t2.daemon = True
t1.start()
t2.start()
time.sleep(5)
def test_index(self):
f = Flow().add(uses=os.path.join(cur_dir, 'yaml/test-index.yml'), parallel=3, separated_workspace=True)
with f:
f.index(input_fn=random_docs(1000))
for j in range(3):
self.assertTrue(os.path.exists(f'test2-{j + 1}/test2.bin'))
self.assertTrue(os.path.exists(f'test2-{j + 1}/tmp2'))
self.add_tmpfile(f'test2-{j + 1}/test2.bin', f'test2-{j + 1}/tmp2', f'test2-{j + 1}')
time.sleep(3)
with f:
f.search(input_fn=random_docs(2), output_fn=get_result)
def test_chunk_joint_idx(self):
f = Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml'))
def validate(req, indexer_name):
self.assertTrue(req.status.code < jina_pb2.Status.ERROR)
self.assertEqual(req.search.docs[0].matches[0].score.op_name, indexer_name)
with f:
f.index(random_docs(100))
g = Flow().add(uses=os.path.join(cur_dir, 'yaml/test-joint.yml'))
with g:
g.search(random_docs(10), output_fn=lambda x: validate(x, 'NumpyIndexer'))
g = Flow(timeout_ready=-1).add(uses=os.path.join(cur_dir, 'yaml/test-joint-wrap.yml'))
with g:
g.search(random_docs(10), output_fn=lambda x: validate(x, 'AnnoyIndexer'))
self.add_tmpfile('vec.gz', 'vecidx.bin', 'chunk.gz', 'chunkidx.bin')
if __name__ == '__main__':
unittest.main()
|
revocation_notifier.py
|
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
from multiprocessing import Process
import threading
import functools
import time
import os
import sys
import signal
import zmq
import simplejson as json
from keylime import config
from keylime import crypto
from keylime import keylime_logging
from keylime import secure_mount
logger = keylime_logging.init_logging('revocation_notifier')
broker_proc = None
def start_broker():
def worker():
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind("ipc:///tmp/keylime.verifier.ipc")
frontend.setsockopt(zmq.SUBSCRIBE, b'')
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://%s:%s" % (config.get('cloud_verifier', 'revocation_notifier_ip'),
config.getint('cloud_verifier', 'revocation_notifier_port')))
zmq.device(zmq.FORWARDER, frontend, backend)
global broker_proc
broker_proc = Process(target=worker)
broker_proc.start()
def stop_broker():
global broker_proc
if broker_proc is not None:
os.kill(broker_proc.pid, signal.SIGKILL)
def notify(tosend):
def worker(tosend):
context = zmq.Context()
mysock = context.socket(zmq.PUB)
mysock.connect("ipc:///tmp/keylime.verifier.ipc")
# wait 100ms for connect to happen
time.sleep(0.2)
# now send it out vi 0mq
logger.info("Sending revocation event to listening nodes..")
for i in range(config.getint('cloud_verifier', 'max_retries')):
try:
mysock.send_string(json.dumps(tosend))
break
except Exception as e:
logger.debug("Unable to publish revocation message %d times, trying again in %f seconds: %s" % (
i, config.getfloat('cloud_verifier', 'retry_interval'), e))
time.sleep(config.getfloat('cloud_verifier', 'retry_interval'))
mysock.close()
cb = functools.partial(worker, tosend)
t = threading.Thread(target=cb)
t.start()
cert_key = None
def await_notifications(callback, revocation_cert_path):
global cert_key
if revocation_cert_path is None:
raise Exception("must specify revocation_cert_path")
context = zmq.Context()
mysock = context.socket(zmq.SUB)
mysock.setsockopt(zmq.SUBSCRIBE, b'')
mysock.connect("tcp://%s:%s" % (config.get('general', 'receive_revocation_ip'),
config.getint('general', 'receive_revocation_port')))
logger.info('Waiting for revocation messages on 0mq %s:%s' %
(config.get('general', 'receive_revocation_ip'), config.getint('general', 'receive_revocation_port')))
while True:
rawbody = mysock.recv()
body = json.loads(rawbody)
if cert_key is None:
# load up the CV signing public key
if revocation_cert_path is not None and os.path.exists(revocation_cert_path):
logger.info(
"Lazy loading the revocation certificate from %s" % revocation_cert_path)
with open(revocation_cert_path, 'r') as f:
certpem = f.read()
cert_key = crypto.x509_import_pubkey(certpem)
if cert_key is None:
logger.warning(
"Unable to check signature of revocation message: %s not available" % revocation_cert_path)
elif 'signature' not in body or body['signature'] == 'none':
logger.warning("No signature on revocation message from server")
elif not crypto.rsa_verify(cert_key, body['msg'].encode('utf-8'), body['signature'].encode('utf-8')):
logger.error("Invalid revocation message siganture %s" % body)
else:
message = json.loads(body['msg'])
logger.debug(
"Revocation signature validated for revocation: %s" % message)
callback(message)
def main():
start_broker()
def worker():
def print_notification(revocation):
logger.warning("Received revocation: %s" % revocation)
keypath = '%s/unzipped/RevocationNotifier-cert.crt' % (
secure_mount.mount())
await_notifications(print_notification, revocation_cert_path=keypath)
t = threading.Thread(target=worker)
t.start()
# time.sleep(0.5)
json_body2 = {
'v': 'vbaby',
'agent_id': '2094aqrea3',
'cloudagent_ip': 'ipaddy',
'cloudagent_port': '39843',
'tpm_policy': '{"ab":"1"}',
'vtpm_policy': '{"ab":"1"}',
'metadata': '{"cert_serial":"1"}',
'allowlist': '{}',
'revocation_key': '',
'revocation': '{"cert_serial":"1"}',
}
print("sending notification")
notify(json_body2)
time.sleep(2)
print("shutting down")
stop_broker()
print("exiting...")
sys.exit(0)
print("done")
if __name__ == "__main__":
main()
|
pants_daemon.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import threading
from contextlib import contextmanager
from dataclasses import dataclass
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink, SignalHandler
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.native import Native
from pants.engine.rules import UnionMembership
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import init_rust_logger, setup_logging
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import GLOBAL_SCOPE
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.contextutil import stdio_as
from pants.util.memo import memoized_property
from pants.util.strutil import ensure_text
class _LoggerStream(object):
"""A sys.std{out,err} replacement that pipes output to a logger.
N.B. `logging.Logger` expects unicode. However, most of our outstream logic, such as in
`exiter.py`, will use `sys.std{out,err}.buffer` and thus a bytes interface. So, we must provide
a `buffer` property, and change the semantics of the buffer to always convert the message to
unicode. This is an unfortunate code smell, as `logging` does not expose a bytes interface so
this is the best solution we could think of.
"""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
msg = ensure_text(msg)
for line in msg.rstrip().splitlines():
# The log only accepts text, and will raise a decoding error if the default encoding is ascii
# if provided a bytes input for unicode text.
line = ensure_text(line)
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
@property
def buffer(self):
return self
class PantsDaemonSignalHandler(SignalHandler):
def __init__(self, daemon):
super().__init__()
self._daemon = daemon
def handle_sigint(self, signum, _frame):
self._daemon.terminate(include_watchman=False)
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = "pantsd.log"
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
@dataclass(frozen=True)
class Handle:
"""A handle to a "probably running" pantsd instance.
We attempt to verify that the pantsd instance is still running when we create a Handle, but
after it has been created it is entirely process that the pantsd instance perishes.
"""
pid: int
port: int
metadata_base_dir: str
class Factory:
@classmethod
def maybe_launch(cls, options_bootstrapper):
"""Creates and launches a daemon instance if one does not already exist.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the running pantsd instance.
:rtype: PantsDaemon.Handle
"""
stub_pantsd = cls.create(options_bootstrapper, full_init=False)
with stub_pantsd._services.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(options_bootstrapper)
return pantsd.launch()
else:
# We're already launched.
return PantsDaemon.Handle(
stub_pantsd.await_pid(10),
stub_pantsd.read_named_socket("pailgun", int),
stub_pantsd._metadata_base_dir,
)
@classmethod
def restart(cls, options_bootstrapper):
"""Restarts a running daemon instance.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
pantsd = cls.create(options_bootstrapper)
with pantsd._services.lifecycle_lock:
# N.B. This will call `pantsd.terminate()` before starting.
return pantsd.launch()
@classmethod
def create(cls, options_bootstrapper, full_init=True):
"""
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
if full_init:
build_root = get_buildroot()
native = Native()
build_config = BuildConfigInitializer.get(options_bootstrapper)
legacy_graph_scheduler = EngineInitializer.setup_legacy_graph(
native, options_bootstrapper, build_config
)
services = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_scheduler,
watchman,
union_membership=UnionMembership(build_config.union_rules()),
)
else:
build_root = None
native = None
services = PantsServices()
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level.upper(),
services=services,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options,
)
@staticmethod
def _setup_services(
build_root,
bootstrap_options,
legacy_graph_scheduler,
watchman,
union_membership: UnionMembership,
):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
should_shutdown_after_run = bootstrap_options.shutdown_pantsd_after_run
fs_event_service = FSEventService(watchman, build_root,)
pidfile_absolute = PantsDaemon.metadata_file_path(
"pantsd", "pid", bootstrap_options.pants_subprocessdir
)
if pidfile_absolute.startswith(build_root):
pidfile = os.path.relpath(pidfile_absolute, build_root)
else:
pidfile = None
logging.getLogger(__name__).warning(
"Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having "
"subprocessdir be a child of buildroot (as it is by default) may help avoid stray "
"pantsd processes."
)
scheduler_service = SchedulerService(
fs_event_service=fs_event_service,
legacy_graph_scheduler=legacy_graph_scheduler,
build_root=build_root,
invalidation_globs=OptionsInitializer.compute_pantsd_invalidation_globs(
build_root, bootstrap_options
),
pantsd_pidfile=pidfile,
union_membership=union_membership,
)
pailgun_service = PailgunService(
(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
DaemonPantsRunner,
scheduler_service,
should_shutdown_after_run,
)
store_gc_service = StoreGCService(legacy_graph_scheduler.scheduler)
return PantsServices(
services=(fs_event_service, scheduler_service, pailgun_service, store_gc_service),
port_map=dict(pailgun=pailgun_service.pailgun_port),
)
def __init__(
self,
native,
build_root,
work_dir,
log_level,
services,
metadata_base_dir,
bootstrap_options=None,
):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param PantsServices services: A registry of services to use in this run.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super().__init__(name="pantsd", metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._bootstrap_options = bootstrap_options
self._log_show_rust_3rdparty = (
bootstrap_options.for_global_scope().log_show_rust_3rdparty
if bootstrap_options
else True
)
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE, self._bootstrap_options, fingerprint_key="daemon", invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info(f"terminating pantsd service: {service}")
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info("terminating pantsd")
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that we can
safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(f"pantsd logging cannot initialize while stdio is open: {fd}")
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
result = setup_logging(
self._log_level,
log_dir=self._log_dir,
log_name=self.LOG_NAME,
native=self._native,
warnings_filter_regexes=self._bootstrap_options.for_global_scope(),
)
self._native.override_thread_logging_destination_to_just_pantsd()
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug("logging initialized")
yield (result.log_handler.stream, result.log_handler.native_filename)
def _setup_services(self, pants_services):
for service in pants_services.services:
self._logger.info(f"setting up service {service}")
service.setup(self._services)
@staticmethod
def _make_thread(service):
name = f"{service.__class__.__name__}Thread"
def target():
Native().override_thread_logging_destination_to_just_pantsd()
service.run()
t = threading.Thread(target=target, name=name)
t.daemon = True
return t
def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical("no services to run, bailing!")
return
service_thread_map = {
service: self._make_thread(service) for service in pants_services.services
}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info(f"starting service {service}")
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure(
f"service {service} failed to start, shutting down!"
)
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name(
"pantsd", self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint)
)
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure(
f"service failure for {service}, shutting down!"
)
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
os.environ.pop("PYTHONPATH")
# Switch log output to the daemon's log stream from here forward.
# Also, register an exiter using os._exit to ensure we only close stdio streams once.
self._close_stdio()
with self._pantsd_logging() as (log_stream, log_filename), ExceptionSink.exiter_as(
lambda _: Exiter(exiter=os._exit)
):
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream, override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace
)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title(f"pantsd [{self._build_root}]")
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
spawn_control_env = dict(
PANTS_ENTRYPOINT=f"{__name__}:launch",
# The daemon should run under the same sys.path as us; so we ensure
# this. NB: It will scrub PYTHONPATH once started to avoid infecting
# its own unrelated subprocesses.
PYTHONPATH=os.pathsep.join(sys.path),
)
exec_env = {**os.environ, **spawn_control_env}
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
spawn_control_env_vars = " ".join(f"{k}={v}" for k, v in spawn_control_env.items())
cmd_line = " ".join(cmd)
self._logger.debug(f"cmd is: {spawn_control_env_vars} {cmd_line}")
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug(
"pantsd: is_alive={self.is_alive()} new_fingerprint={new_fingerprint} current_fingerprint={self.fingerprint}"
)
return self.needs_restart(new_fingerprint)
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug("launching pantsd")
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket("pailgun", int)
self._logger.debug(f"pantsd is running at pid {self.pid}, pailgun port is {listening_port}")
return self.Handle(pantsd_pid, listening_port, self._metadata_base_dir)
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super().terminate()
if include_watchman:
self.watchman_launcher.terminate()
def needs_restart(self, option_fingerprint):
"""Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingeprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
should_shutdown_after_run = (
self._bootstrap_options.for_global_scope().shutdown_pantsd_after_run
)
return super().needs_restart(option_fingerprint) or (
self.is_alive() and should_shutdown_after_run
)
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create(OptionsBootstrapper.create()).run_sync()
|
webtransport_h3_server.py
|
import asyncio
import logging
import os
import ssl
import threading
import traceback
from enum import IntEnum
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import UINT_VAR_MAX_SIZE, Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import stream_is_unidirectional # type: ignore
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
class CapsuleType(IntEnum):
# Defined in
# https://www.ietf.org/archive/id/draft-ietf-webtrans-http3-01.html.
CLOSE_WEBTRANSPORT_SESSION = 0x2843
class H3Capsule:
"""
Represents the Capsule concept defined in
https://ietf-wg-masque.github.io/draft-ietf-masque-h3-datagram/draft-ietf-masque-h3-datagram.html#name-capsules.
"""
def __init__(self, type: int, data: bytes) -> None:
self.type = type
self.data = data
@staticmethod
def decode(data: bytes) -> Any:
"""
Returns an H3Capsule representing the given bytes.
"""
buffer = Buffer(data=data)
type = buffer.pull_uint_var()
length = buffer.pull_uint_var()
return H3Capsule(type, buffer.pull_bytes(length))
def encode(self) -> bytes:
"""
Encodes this H3Connection and return the bytes.
"""
buffer = Buffer(capacity=len(self.data) + 2 * UINT_VAR_MAX_SIZE)
buffer.push_uint_var(self.type)
buffer.push_uint_var(len(self.data))
buffer.push_bytes(self.data)
return buffer.data
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3Connection] = None
self._session_stream_id: Optional[int] = None
self._session_stream_data: bytes = b""
self._allow_calling_session_closed = True
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3Connection(self._quic, enable_webtransport=True)
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
self._session_stream_id = event.stream_id
if self._session_stream_id == event.stream_id and\
isinstance(event, WebTransportStreamDataReceived):
self._session_stream_data += event.data
if event.stream_ended:
close_info = None
if len(self._session_stream_data) > 0:
capsule: H3Capsule =\
H3Capsule.decode(self._session_stream_data)
close_info = (0, b"")
if capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(capsule.data)
code = buffer.pull_uint32()
reason = buffer.data()
# TODO(yutakahirano): Make sure `reason` is a
# UTF-8 text.
close_info = (code, reason)
self._call_session_closed(close_info, abruptly=False)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
self._handler.datagram_received(data=event.data)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except IOError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self.send_stream_data(session_stream_id, capsule.encode())
self.send_stream_data(session_stream_id, b'', end_stream=True)
self._protocol.transmit()
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
self._http.send_datagram(flow_id=self.session_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
|
__init__.py
|
import os
import sched, time
import threading
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.label import Label
from kivy.uix.screenmanager import Screen
from kivy_garden.mapview import MapMarker, MapLayer
_app_name_ = 'navigation'
class navigationApp(Screen):
def __init__(self, **kwargs):
super(navigationApp, self).__init__(**kwargs)
self.name = _app_name_
self.root = App.get_running_app().root
self.dir = dirname = os.path.dirname(__file__)
self.sched = sched.scheduler(time.time, time.sleep)
self.sched.enter(1, 10, self.update_coord)
layout = Builder.load_file("{}/navigation.kv".format(self.dir))
self.map = layout.ids.map
self.add_widget(layout)
def update_coord(self):
#print("UPDATE!")
self.car.lat += 0.001
self.map.center_on(self.map.lat+0.001, self.map.lon)
self.sched.enter(1, 10, self.update_coord)
def on_enter(self):
# Setup map
self.map.center_on(48.856614,2.3522219)
self.car = MapMarker(lat=48.856614, lon=2.3522219)
self.map.add_marker(self.car)
# Run auto-update
self.thread = threading.Thread(target=self.sched.run)
self.thread.start()
def on_stop(self):
if self.hasattr('thread'):
self.thread.stop()
|
_channel.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import sys
import threading
import time
import logging
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
def _deadline(timeout):
if timeout is None:
return None, _INFINITE_FUTURE
else:
deadline = time.time() + timeout
return deadline, cygrpc.Timespec(deadline)
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
'Internal gRPC call error %d. ' +
'Please report to https://github.com/grpc/grpc/issues')
def _check_call_error(call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
raise ValueError('metadata was invalid: %s' % metadata)
elif call_error != cygrpc.CallError.ok:
raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
def _call_error_set_RPCstate(state, call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
_abort(state, grpc.StatusCode.INTERNAL,
'metadata was invalid: %s' % metadata)
else:
_abort(state, grpc.StatusCode.INTERNAL,
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.received_metadata
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.received_message.bytes()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.received_metadata
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.received_status_code)
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
batch_operation.received_status_code,
batch_operation.received_status_details)
else:
state.code = code
state.details = batch_operation.received_status_details
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, call, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return call if done else None
return handle_event
def _consume_request_iterator(request_iterator, state, call,
request_serializer):
event_handler = _event_handler(state, call, None)
def consume_request_iterator():
while True:
try:
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
logging.exception("Exception iterating requests!")
call.cancel()
_abort(state, grpc.StatusCode.UNKNOWN,
"Exception iterating requests!")
return
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
call.cancel()
details = 'Exception serializing request!'
_abort(state, grpc.StatusCode.INTERNAL, details)
return
else:
operations = (cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_message)
while True:
state.condition.wait()
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
call.start_client_batch(operations, event_handler)
state.due.add(cygrpc.OperationType.send_close_from_client)
def stop_consumption_thread(timeout): # pylint: disable=unused-argument
with state.condition:
if state.code is None:
call.cancel()
state.cancelled = True
_abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
state.condition.notify_all()
consumption_thread = _common.CleanupThread(
stop_consumption_thread, target=consume_request_iterator)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
_abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state, self._call,
self._response_deserializer)
self._call.start_client_batch(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),),
event_handler)
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
else:
return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
self._state.code, _common.decode(self._state.details))
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
self._state.code = grpc.StatusCode.CANCELLED
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline, deadline_timespec = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, deadline_timespec, None, rendezvous
else:
return deadline, deadline_timespec, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _prepare(self, request, timeout, metadata):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
return None, None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
return state, operations, deadline, deadline_timespec, None
def _blocking(self, request, timeout, metadata, credentials):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
raise rendezvous
else:
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
call_error = call.start_client_batch(operations, None)
_check_call_error(call_error, metadata)
_handle_event(completion_queue.poll(), state,
self._response_deserializer)
return state, call, deadline
def __call__(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self, request, timeout=None, metadata=None, credentials=None):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
return rendezvous
else:
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout=None, metadata=None, credentials=None):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
metadata, _EMPTY_FLAGS), cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _blocking(self, request_iterator, timeout, metadata, credentials):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None, deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
None)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, None)
_check_call_error(call_error, metadata)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
while True:
event = completion_queue.poll()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call, deadline
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(metadata, _EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(operations, event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.completion_queue = cygrpc.CompletionQueue()
self.managed_calls = None
def _run_channel_spin_thread(state):
def channel_spin():
while True:
event = state.completion_queue.poll()
completed_call = event.tag(event)
if completed_call is not None:
with state.lock:
state.managed_calls.remove(completed_call)
if not state.managed_calls:
state.managed_calls = None
return
def stop_channel_spin(timeout): # pylint: disable=unused-argument
with state.lock:
if state.managed_calls is not None:
for call in state.managed_calls:
call.cancel()
channel_spin_thread = _common.CleanupThread(
stop_channel_spin, target=channel_spin)
channel_spin_thread.start()
def _channel_managed_call_management(state):
def create(parent, flags, method, host, deadline):
"""Creates a managed cygrpc.Call and a function to call to drive it.
If operations are successfully added to the returned cygrpc.Call, the
returned function must be called. If operations are not successfully added
to the returned cygrpc.Call, the returned function must not be called.
Args:
parent: A cygrpc.Call to be used as the parent of the created call.
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A cygrpc.Timespec to be the deadline of the created call.
Returns:
A cygrpc.Call with which to conduct an RPC and a function to call if
operations are successfully started on the call.
"""
call = state.channel.create_call(parent, flags, state.completion_queue,
method, host, deadline)
def drive():
with state.lock:
if state.managed_calls is None:
state.managed_calls = set((call,))
_run_channel_spin_thread(state)
else:
state.managed_calls.add(call)
return call, drive
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = threading.Thread(
target=_deliver, args=(state, state.connectivity, callbacks,))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
completion_queue = cygrpc.CompletionQueue()
while True:
channel.watch_connectivity_state(connectivity,
cygrpc.Timespec(time.time() + 0.2),
completion_queue, None)
event = completion_queue.poll()
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
# NOTE(nathaniel): The field is only ever used as a
# sequence so it's fine that both lists and tuples are
# assigned to it.
callbacks = _deliveries(state) # pylint: disable=redefined-variable-type
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = _common.CleanupThread(
lambda timeout: _moot(state),
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target),
_common.channel_args(_options(options)), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
# TODO(https://github.com/grpc/grpc/issues/9884)
# Temporary work around UNAVAILABLE issues
# Remove this once c-core has retry support
_subscribe(self._connectivity_state, lambda *args: None, None)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def __del__(self):
_moot(self._connectivity_state)
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see http://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
(2,7) : (operator.lt, lambda x: x in ['run.withstat_py27', # multi context with statement
'run.yield_inside_lambda',
'run.test_dictviews',
'run.pyclass_special_methods',
'run.set_literals',
]),
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2'
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self):
self.output = []
self.write = self.output.append
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir, self.cleanup_workdir, stats=self.stats))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
from pythran import __version__ as pythran_version
pythran_ext = (
pythran.config.make_extension(python=True)
if pythran_version >= '0.9' or pythran_version >= '0.8.7'
else pythran.config.make_extension()
)
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list ]
return tests
def build_test(self, test_class, path, workdir, module, tags, language,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % str(preparse)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s) %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
if not cleanup_c_files:
if (rmfile[-2:] in (".c", ".h") or
rmfile[-4:] == ".cpp" or
rmfile.endswith(".html") and rmfile.startswith(self.module)):
continue
is_shared_obj = rmfile.endswith(".so") or rmfile.endswith(".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter()
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter()
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Main import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Main import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def shortDescription(self):
if self.cython_only:
return CythonCompileTestCase.shortDescription(self)
else:
return "compiling (%s%s) and running %s" % (self.language, "/pythran" if self.pythran_dir is not None else "", self.name)
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
# Py2.6 lacks "_TextTestResult.skipped"
failures, errors, skipped = len(result.failures), len(result.errors), len(getattr(result, 'skipped', []))
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
output = None
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
output = open(result_file, 'wb')
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
try:
if output is not None:
output.close()
except:
pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception("Tests in module '%s' were unexpectedly killed by signal %d"%
(module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
input = open(result_file, 'rb')
try:
PartialTestResult.join_results(result, pickle.load(input))
finally:
input.close()
if result_code:
raise Exception("Tests in module '%s' exited with status %d" %
(module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run((
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
))
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
try:
self.skipped
except AttributeError:
self.skipped = [] # Py2.6
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
try:
result.skipped.extend(skipped)
except AttributeError:
pass # Py2.6
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.name)
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "tox.ini")
paths = glob.glob(os.path.join(self.cython_dir, "**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
env['PYTHONPATH'] = self.cython_syspath + os.pathsep + (old_path or '')
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
cmd.append(command)
out.append(_out)
err.append(_err)
res = p.returncode
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=sys.version_info[0] < 3):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.currentThread()
blocking_threads = []
for t in threading.enumerate():
if not t.isAlive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.isAlive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread():
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread():
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
from datetime import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.now
write = sys.__stderr__.write
stop = False
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2.6 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Main import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Main import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
if sys.version_info < (2, 7):
sys.stderr.write("--failfast not supported with Python < 2.7\n")
else:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
test_bigquery.py
|
import unittest
import os
import json
from unittest.mock import patch
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from google.cloud.bigquery._http import Connection
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient, _DataProxyConnection
import kaggle_secrets
class TestBigQuery(unittest.TestCase):
API_BASE_URL = "http://127.0.0.1:2121"
def _test_integration(self, client):
class HTTPHandler(BaseHTTPRequestHandler):
called = False
bearer_header_found = False
def do_HEAD(self):
self.send_response(200)
def do_GET(self):
HTTPHandler.called = True
HTTPHandler.bearer_header_found = any(
k for k in self.headers if k == "authorization" and self.headers[k] == "Bearer secret")
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
sample_dataset = {
"id": "bigqueryproject:datasetname",
"datasetReference": {
"datasetId": "datasetname",
"projectId": "bigqueryproject"
}
}
self.wfile.write(json.dumps({"kind": "bigquery#datasetList", "datasets": [sample_dataset]}).encode("utf-8"))
server_address = urlparse(self.API_BASE_URL)
with HTTPServer((server_address.hostname, server_address.port), HTTPHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
for dataset in client.list_datasets():
self.assertEqual(dataset.dataset_id, "datasetname")
httpd.shutdown()
self.assertTrue(
HTTPHandler.called, msg="Fake server was not called from the BQ client, but should have been.")
self.assertTrue(
HTTPHandler.bearer_header_found, msg="authorization header was missing from the BQ request.")
def _setup_mocks(self, api_url_mock):
api_url_mock.__str__.return_value = self.API_BASE_URL
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_empty_integrations(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', '')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_unrelated_integrations(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'GCS:ANOTHER_ONE')
with env:
client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_connected_account_default_credentials(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
client = bigquery.Client(project='ANOTHER_PROJECT')
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_project_with_env_var_project_default_credentials(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
env.set('GOOGLE_CLOUD_PROJECT', 'ANOTHER_PROJECT')
with env:
client = bigquery.Client()
self._test_integration(client)
@patch.object(Connection, 'API_BASE_URL')
@patch.object(kaggle_secrets.UserSecretsClient, 'get_bigquery_access_token', return_value=('secret',1000))
def test_simultaneous_clients(self, mock_access_token, ApiUrlMock):
self._setup_mocks(ApiUrlMock)
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
proxy_client = bigquery.Client()
bq_client = bigquery.Client(
project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_integration(bq_client)
# Verify that proxy client is still going to proxy to ensure global Connection
# isn't being modified.
self.assertNotEqual(type(proxy_client._connection), KaggleKernelCredentials)
self.assertEqual(type(proxy_client._connection), _DataProxyConnection)
def test_no_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
with self.assertRaises(DefaultCredentialsError):
# TODO(vimota): Handle this case, either default to Kaggle Proxy or use some default project
# by the user or throw a custom exception.
client = bigquery.Client()
self._test_integration(client)
def test_magics_with_connected_account_default_credentials(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'BIGQUERY')
with env:
import sitecustomize
sitecustomize.init()
from google.cloud.bigquery import magics
self.assertEqual(type(magics.context._credentials), KaggleKernelCredentials)
magics.context.credentials = None
def test_magics_without_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
with env:
import sitecustomize
sitecustomize.init()
from google.cloud.bigquery import magics
self.assertIsNone(magics.context._credentials)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import _tpu_estimator_embedding
from tensorflow.python.tpu import error_handling
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_config
from tensorflow.python.tpu import tpu_context
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu import util as util_lib
from tensorflow.python.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
from tensorflow.python.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Use tf.contrib.summary inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._tpu_compile_op = tpu_compile_op
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
experimental_export_device_assignment,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
experimental_export_device_assignment: If `True`, use user-provided device
assignment. If `False`, round-robin computation among all TPU cores
visible to the host.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
if experimental_export_device_assignment:
return computation(computation_inputs)
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
host_call.record({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an EmbeddingConfigSpec instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_savedmodel()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
experimental_export_device_assignment=False,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_savedmodel()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_savedmodel()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training !=
tpu_config.InputPipelineConfig.PER_HOST_V2):
raise ValueError('Only PER_HOST_V2 is supported when using TPU '
'Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
self._experimental_export_device_assignment = (
experimental_export_device_assignment)
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
if mode == _INFERENCE_ON_TPU_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None,
experimental_export_device_assignment=self
._experimental_export_device_assignment,
call_context=self._ctx)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if self._log_every_n_steps is not None:
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if self._log_every_n_steps is not None:
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if self._log_every_n_steps is not None:
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.append(
training.LoggingTensorHook({
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency))
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None,
experimental_export_device_assignment=False,
call_context=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
call_context: an optional TPUContext under which the TPU run configuartion
is stored.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params, experimental_export_device_assignment,
call_context)
tensors = call_computation(
features,
computation,
experimental_export_device_assignment=
experimental_export_device_assignment,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params,
experimental_export_device_assignment,
call_context=None):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
if experimental_export_device_assignment and call_context:
# Export the device assignment as part of the model. This is useful for
# model parallel usecases where the model relies on the mapping between
# logical and physical devices.
with call_context.with_mode(_INFERENCE_ON_TPU_MODE) as ctx:
device_assignment = ctx.device_assignment
else:
device_assignment = None
if experimental_export_device_assignment:
tensors_on_cpu = tpu.rewrite_for_inference(
tpu_computation, device_assignment=device_assignment)
else:
tensors_on_cpu = tpu.rewrite(
tpu_computation, device_assignment=device_assignment)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
maxinet.py
|
#!/usr/bin/python
"""MaxiNet main file
This file holds the main components of MaxiNet and is intended to be the
only part of MaxiNet which needs to be used by the user or third-party
applications.
Classes in this file:
Experiment: Use this class to specify an experiment. Experiments are
created for one-time-usage and have to be stopped in the end. One
cluster instance can run several experiments in sequence.
Cluster: Manage a set of Workers via this class. A cluster can run one
Experiment at a time. If you've got several Experiments to run do
not destroy/recreate this class but define several Experiment
instances and run them sequentially.
NodeWrapper: Wrapper that allows most commands that can be used in
mininet to be used in MaxiNet as well. Whenever you call for example
> exp.get("h1")
you'll get an instance of NodeWrapper which will forward calls to
the respective mininet node.
TunHelper: Helper class to manage tunnel interface names.
Worker: A Worker is part of a Cluster and runs a part of the emulated
network.
"""
import atexit
import functools
import logging
import random
import re
import subprocess
import sys
import time
import warnings
import threading
from mininet.node import RemoteController, UserSwitch
from mininet.link import TCIntf, Intf, Link, TCLink
import Pyro4
from MaxiNet.Frontend.cli import CLI
from MaxiNet.tools import Tools, MaxiNetConfig, SSH_Tool
from MaxiNet.Frontend.partitioner import Partitioner
logger = logging.getLogger(__name__)
# the following block is to support deprecation warnings. this is really
# not solved nicely and should probably be somewhere else
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
@functools.wraps(func)
def new_func(*args, **kwargs):
logger.warn("Call to deprecated function {}.".format(func.__name__))
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1)
return func(*args, **kwargs)
return new_func
def run_cmd(cmd):
"""Run cmd on frontend machine.
See also: rum_cmd_shell(cmd)
Args:
cmd: Either a string of program name or sequence of program
arguments.
Returns:
Stdout of cmd call as string.
"""
return subprocess.check_output(cmd, shell=False)
def run_cmd_shell(cmd):
"""Run cmd on frontend machine.
See also: rum_cmd(cmd)
Args:
cmd: Either a string of program name and arguments or sequence
of program name and arguments.
Returns:
Stdout of cmd call as string.
"""
return subprocess.check_output(cmd, shell=True)
class Worker(object):
"""Worker class used to manage an individual Worker host.
A Worker is part of a Cluster and runs a part of the emulated
network. A Worker is identified by its hostname.
The Worker class is instanciated when a Worker is added to a Cluster.
Attributes:
config: instance of class MaxiNetConfig
mininet: remote instance of class MininetManager which is used to
create and manage mininet on the Worker machine.
server: remote instance of class WorkerServer which is used to run
commands on the Worker machine.
switch: default mininet switch class to use in mininet instances.
ssh: instance of class SSH_Manager used to configure the ssh daemon
on the worker.
sshtool: instance of class SSH_Tool used to manage the ssh client on
the frontend machine.
"""
def __init__(self, nameserver, pyroname, pyropw, sshtool, switch=UserSwitch):
"""Init Worker class."""
self.server = Pyro4.Proxy(nameserver.lookup(pyroname))
self.server._pyroHmacKey=pyropw
self.mininet = Pyro4.Proxy(nameserver.lookup(pyroname+".mnManager"))
self.mininet._pyroHmacKey=pyropw
self.ssh = Pyro4.Proxy(nameserver.lookup(pyroname+".sshManager"))
self.ssh._pyroHmacKey=pyropw
self.config = Pyro4.Proxy(nameserver.lookup("config"))
self.config._pyroHmacKey=pyropw
if(not self.config.run_with_1500_mtu()):
self._fix_mtus()
self.switch = switch
self.sshtool = sshtool
self.sshtool.add_known_host(self.ip())
self._x11tunnels = []
self.run_script("load_tunneling.sh")
self._add_ssh_key()
def _add_ssh_key(self):
"""add ssh key of frontend machine to worker ssh daemon"""
k = self.sshtool.get_pub_ssh_key()
self.ssh.add_key(k)
def hn(self):
"""Get hostname of worker machine."""
return self.server.get_hostname()
def set_switch(self, switch):
"""Set default switch class."""
self.switch = switch
def configLinkStatus(self, src, dst, status):
"""Wrapper for configLinkStatus method on remote mininet.
Used to enable and disable links.
Args:
src: name of source node
dst: name of destination node
status: string {up|down}
"""
self.mininet.configLinkStatus(src, dst, status)
def ip(self, classifier=None):
"""Get public ip adress of worker machine.
Args:
classifier: if multiple ip addresses are configured for a worker
a classifier can be used to hint which ip address should be used.
"""
return self.config.get_worker_ip(self.hn(), classifier)
def start(self, topo, tunnels, controller=None):
"""Start mininet instance on worker machine.
Start mininet emulating the in argument topo specified topology.
if controller is not specified mininet will start an own
controller for this net.
Args:
topo: Topology to emulate on this worker.
tunnels: List of tunnels in format: [[tunnelname, switch,
options],].
controller: optional mininet controller class to use in this
network.
"""
STT = self.config.use_stt_tunneling()
if controller:
self.mininet.create_mininet(topo=topo, tunnels=tunnels,
controller=controller,
switch=self.switch, STT=STT)
else:
self.mininet.create_mininet(topo=topo, tunnels=tunnels,
switch=self.switch, STT=STT)
def daemonize(self, cmd):
"""run command in background and terminate when MaxiNet is shut
down."""
self.server.daemonize(cmd)
def daemonize_script(self, script, args):
"""run script from script folder in background and terminate when MaxiNet is shut
down.
Args:
script: Script name to call
args: string of args which will be appended to script name call
"""
self.server.daemonize_script(script, args)
def tunnelX11(self, node):
"""Create X11 tunnel from Frontend to node on worker to make
x-forwarding work.
This is used in CLI class to allow calls to wireshark etc.
For each node only one tunnel will be created.
Args:
node: nodename
Returns:
boolean whether tunnel was successfully created.
"""
if(not node in self._x11tunnels):
try:
display = subprocess.check_output(
self.sshtool.get_ssh_cmd(targethostname=self.hn(),
cmd="env | grep DISPLAY",
opts=["-Y"]))[8:]
self.mininet.tunnelX11(node, display)
self._x11tunnels.append(node)
except subprocess.CalledProcessError:
return False
return True
def run_cmd_on_host(self, host, cmd):
"""Run cmd in context of host and return output.
Args:
host: nodename
cmd: string of program name and arguments to call.
Returns:
Stdout of program call.
"""
return self.mininet.runCmdOnHost(host, cmd)
def run_cmd(self, cmd):
"""run cmd on worker machine and return output.
Args:
cmd: string of program name and arguments to call.
Returns:
Stdout of program call.
"""
return self.server.check_output(cmd)
def run_script(self, cmd):
"""Run MaxiNet script on worker machine and return output.
Args:
cmd: String of name of MaxiNet script and arguments.
Returns:
Stdout of program call.
"""
return self.server.script_check_output(cmd)
def rpc(self, host, cmd, *params1, **params2):
"""Do rpc call to mininet node.
MaxiNet uses this function to do rpc calls on remote nodes in
NodeWrapper class.
Args:
host: Nodename
cmd: Method of node to call.
*params1: Unnamed parameters to call.
**params2: Named parameters to call.
Returns:
Return of host.cmd(*params1,**params2).
WARNING: if returned object is not serializable this might
crash.
"""
return self.mininet.rpc(host, cmd, *params1, **params2)
def rattr(self, host, name):
"""Get attributes of mininet node.
MaxiNet uses this function to get attributes of remote nodes in
NodeWrapper class.
Args:
host: Nodename
Returns:
host.name
WARNING: if the attribute is not serializable this might
crash.
"""
return self.mininet.attr(host, name)
def _fix_mtus(self):
"""If mtu of Worker is lower than 1600 set it to 1600.
In order to transfer 1500 byte long packets over GRE tunnels
the MTU of the interface which "transfers" the tunnel has to be
set to 1600.
This method tries to determine the correct network interface and
sets its MTU. This method is not needed if MaxiNet is configured
to use MTUs lower than 1451 on the mininet nodes.
"""
if self.ip(classifier="backend") is None:
logger.warn("no ip configured - can not fix MTU ")
return 0
intf = self.run_cmd("ip addr show to " + self.ip(classifier="backend") +
"/24 | head -n1 | cut -d' ' -f2 | tr -d :").strip()
if intf == "":
logger.warn("could not find eth device - can not fix MTU")
return 0
mtu = int(self.run_cmd("ip li show dev " + intf +
" | head -n1 | cut -d ' ' -f5"))
if(mtu < 1600):
self.run_cmd("ip li se dev " + intf + " mtu 1600")
def stop(self):
"""Stop mininet instance on this worker."""
return self.mininet.destroy_mininet()
def get_file(self, src, dst):
"""Transfer file specified by src on worker to dst on Frontend.
Transfers file src to filename or folder dst on Frontend machine
via scp.
Args:
src: string of path to file on Worker
dst: string of path to file or folder on Frontend
"""
cmd_get = self.sshtool.get_scp_get_cmd(targethostname=self.hn(),
remote=src,
local=dst)
subprocess.call(cmd_get)
def put_file(self, src, dst):
"""transfer file specified by src on Frontend to dst on worker.
Transfers file src to filename or folder dst on Worker machine
via scp.
Args:
src: string of path to file on Frontend
dst: string of path to file or folder on Worker
"""
cmd_put = self.sshtool.get_scp_put_cmd(targethostname=self.hn(),
local=src,
remote=dst)
subprocess.call(cmd_put)
def addHost(self, name, cls=None, **params):
"""Add host at runtime.
You probably want to use Experiment.addHost as this does some
bookkeeping of nodes etc.
Args:
name: Nodename to add. Must not already exist on Worker.
cls: Node class to use.
**params: Additional parameters for cls instanciation.
Returns:
nodename
"""
return self.mininet.addHost(name, cls, **params)
def addSwitch(self, name, cls=None, **params):
"""Add switch at runtime.
You probably want to use Experiment.addSwitch as this does some
bookkeeping on nodes etc.
Args:
name: switchname to add. Must not already exist on Worker.
cls: Node class to use.
**params: Additional parameters for cls instanciation.
Returns:
nodename
"""
return self.mininet.addSwitch(name, cls, **params)
def addController(self, name="c0", controller=None, **params):
"""Add controller at runtime.
You probably want to use Experiment.addController as this does
some bookkeeping on nodes etc.
Args:
name: controllername to add. Must not already exist on Worker.
controller: mininet controller class to use.
**params: Additional parameters for cls instanciation.
Returns:
controllername
"""
return self.mininet.addHost(name, controller, **params)
def addTunnel(self, name, switch, port, intf, **params):
"""Add tunnel at runtime.
You probably want to use Experiment.addLink as this does some
bookkeeping on tunnels etc.
Args:
name: tunnelname (must be unique on Worker)
switch: name of switch to which tunnel will be connected.
port: port number to use on switch.
intf: Intf class to use when creating the tunnel.
"""
self.mininet.addTunnel(name, switch, port, intf, **params)
def addLink(self, node1, node2, port1=None, port2=None,
cls=None, **params):
"""Add link at runtime.
You probably want to use Experiment.addLink as this does some
bookkeeping.
Args:
node1: nodename
node2: nodename
port1: optional port number to use on node1.
port2: optional port number to use on node2.
cls: optional class to use when creating the link.
Returns:
Tuple of the following form: ((node1,intfname1),
(node2,intfname2)) where intfname1 and intfname2 are the
names of the interfaces which where created for the link.
"""
return self.mininet.addLink(node1, node2, port1, port2, cls,
**params)
class TunHelper:
"""Class to manage tunnel interface names.
This class is used by MaxiNet to make sure that tunnel interace
names are unique.
WARNING: This class is not designed for concurrent use!
Attributes:
tunnr: counter which increases with each tunnel.
keynr: counter which increases with each tunnel.
"""
def __init__(self):
"""Inits TunHelper"""
self.tunnr = 0
self.keynr = 0
def get_tun_nr(self):
"""Get tunnel number.
Returns a number to use when creating a new tunnel.
This number will only be returned once by this method.
(see get_last_tun_nr)
Returns:
Number to use for tunnel creation.
"""
self.tunnr = self.tunnr + 1
return self.tunnr - 1
def get_key_nr(self):
"""Get key number.
Returns a number to use when creating a new tunnel.
This number will only be returned once by this method.
(see get_last_key_nr)
Returns:
Number to use for key in tunnel creation.
"""
self.keynr = self.keynr + 1
return self.keynr - 1
def get_last_tun_nr(self):
"""Get last tunnel number.
Returns the last number returned by get_tun_nr.
Returns:
Number to use for tunnel creation.
"""
return self.tunnr - 1
def get_last_key_nr(self):
"""Get last key number.
Returns the last number returned by get_key_nr.
Returns:
Number to use for key in tunnel creation.
"""
return self.keynr - 1
class Cluster(object):
"""Class used to manage a cluster of Workers.
Manage a set of Workers via this class. A cluster can run one
Experiment at a time. If you've got several Experiments to run do
not destroy/recreate this class but define several Experiment
instances and run them sequentially.
Attributes:
config: Instance of Tools.Config to query MaxiNet configuration.
frontend: Instance of MaxiNet.Frontend.client.Frontend which
is used to manage the pyro Server.
hostname_to_worker: dictionary which translates hostnames into Worker
instances
hosts: List of worker hostnames.
ident: random integer which identifies this cluster instance on the
FrontendServer.
localIP: IPv4 address of the Frontend.
logger: Logging instance.
nameserver: pyro nameserver
nsport: Nameserver port number.
manager: MaxiNetManager instance hosted by FrontendServer which manages
Workers.
sshtool: SSH_Tool instance which is used to manage ssh client on frontend
machine.
tunhelper: Instance of TunHelper to enumerate tunnel instances.
worker: List of worker instances. Index of worker instance in
sequence must be equal to worker id.
"""
def __init__(self, ip=None, port=None, password=None, minWorkers=None, maxWorkers=None):
"""Inits Cluster class.
Args:
ip: IP address of FrontendServer nameserver.
port: port of FrontendServer nameserver.
password: password of FrontendServer nameserver.
maxWorkers: number of workers to allocate to this cluster; None for "all you can get".
minWorkers: minimum number of workers to allocate to this cluster; None for "at least 1"
"""
self.logger = logging.getLogger(__name__)
self.tunhelper = TunHelper()
self.config = MaxiNetConfig()
if(ip is None):
ip = self.config.get_nameserver_ip()
if(port is None):
port = self.config.get_nameserver_port()
if(password is None):
password = self.config.get_nameserver_password()
self.nameserver = Pyro4.locateNS(host=ip, port=port, hmac_key=password)
self.config = Pyro4.Proxy(self.nameserver.lookup("config"))
self.config._pyroHmacKey=password
self.manager = Pyro4.Proxy(self.nameserver.lookup("MaxiNetManager"))
self.manager._pyroHmacKey=password
self.sshtool = SSH_Tool(self.config)
self.hostname_to_worker={}
self._create_ident()
logging.basicConfig(level=self.config.get_loglevel())
self.hosts = []
self.worker = []
atexit.register(self._stop)
#register this Cluster to the nameserver as key self.ident:
myIP = subprocess.check_output("ip route get %s | cut -d' ' -f1" % ip, shell=True)
if (myIP.strip() == "local"):
myIP = "127.0.0.1"
else:
myIP = subprocess.check_output("ip route get %s" % ip, shell=True).split("src")[1].split()[0]
self._pyrodaemon = Pyro4.Daemon(host=myIP)
self._pyrodaemon._pyroHmacKey=password
uri = self._pyrodaemon.register(self)
self.nameserver.register(self.ident, uri)
self._pyro_daemon_thread = threading.Thread(target=self._pyrodaemon.requestLoop)
self._pyro_daemon_thread.daemon = True
self._pyro_daemon_thread.start()
if(maxWorkers == None):
self.add_workers()
else:
for i in range(0,maxWorkers):
self.add_worker()
if ((minWorkers != None) and (self.num_workers() < minWorkers)):
raise Exception("Not enough free workers to run this experiment (got %s, required %s). " % (self.num_workers(), minWorkers))
def _create_ident(self):
"""Create and register identifier to use when communicating with the
FrontendServer"""
self.ident = None
hn = run_cmd("hostname").strip()
ident = "%s:%s" % (hn, sys.argv[0])
if not self.manager.register_ident(ident):
for i in range(2, 10000):
ident = "%s:%s-%d" % (hn, sys.argv[0], i)
if self.manager.register_ident(ident):
self.ident = ident
break
else:
self.ident = ident
@Pyro4.expose
def get_status_is_alive(self):
"""Get the status of this Cluster object.
returns true if the object is still alive.
this function is periodically called from the FrontendServer to check if the cluster still exists
otherwise its allocated resources (workers) are freed for future use by other clusters
"""
return True
@Pyro4.expose
def get_available_workers(self):
"""Get list of worker hostnames which are not reserved.
Returns:
list of hostnames of workers which are registered on the FrontendServer
but not reserved by this or another Cluster instance.
"""
return self.manager.get_free_workers()
@Pyro4.expose
def add_worker_by_hostname(self, hostname):
"""Add worker by hostname
Reserves a Worker for this Cluster on the FrontendServer and adds it to
the Cluster instance. Fails if Worker is reserved by other Cluster or
no worker with that hostname exists.
Args:
hostname: hostname of Worker
Returns:
True if worker was successfully added, False if not.
"""
pyname = self.manager.reserve_worker(hostname, self.ident)
if(pyname):
self.worker.append(Worker(self.nameserver, pyname, self.config.get_nameserver_password(), self.sshtool))
self.hostname_to_worker[hostname] = self.worker[-1]
self.logger.info("added worker %s" % hostname)
return True
else:
self.logger.warn("adding worker %s failed" % hostname)
return False
@Pyro4.expose
def add_worker(self):
"""Add worker
Reserves a Worker for this Cluster on the FrontendServer and adds it to
the Cluster instance. Fails if no unreserved Worker is available on the
FrontendServer.
Returns:
True if worker was successfully added, False if not.
"""
hns = self.get_available_workers().keys()
for hn in hns:
if(self.add_worker_by_hostname(hn)):
return True
return False
@Pyro4.expose
def add_workers(self):
"""Add all available workers
Reserves all unreserved Workers for this Cluster on the FrontendServer
and adds them to the Cluster instance.
Returns:
Number of workers added.
"""
i = 0
while self.add_worker():
i = i + 1
return i
@Pyro4.expose
def remove_worker(self, worker):
"""Remove worker from Cluster
Removes a Worker from the Cluster and makes it available for other
Cluster instances on the FrontendServer.
Args:
worker: hostname or Worker instance of Worker to remove.
"""
if(not isinstance(worker, Worker)):
worker = self.hostname_to_worker[worker]
del self.hostname_to_worker[worker.hn()]
self.worker.remove(worker)
worker.run_script("delete_tunnels.sh")
hn = worker.hn()
self.manager.free_worker(worker.hn(), self.ident)
self.logger.info("removed worker %s" % hn)
@Pyro4.expose
def remove_workers(self):
"""Remove all workers from this cluster
Removes all Workers from the Cluster and makes them available for other
Cluster instances on the FrontendServer.
"""
while(len(self.worker) > 0):
self.remove_worker(self.worker[0])
def _stop(self):
"""Stop Cluster and shut it down.
Removes all Workers from the Cluster and makes them available for other
Cluster instances on the FrontendServer.
"""
self.remove_workers()
self.manager.unregister_ident(self.ident)
self.nameserver.remove(self.ident)
self._pyrodaemon.unregister(self)
self._pyrodaemon.shutdown()
@Pyro4.expose
def num_workers(self):
"""Return number of worker nodes in this Cluster."""
return len(self.workers())
@Pyro4.expose
def workers(self):
"""Return sequence of worker instances for this cluster.
Returns:
Sequence of worker instances.
"""
return self.worker
@Pyro4.expose
def get_worker(self, hostname):
"""Return worker instance of worker with hostname hostname.
Args:
hostname: worker hostname
Returns:
Worker instance
"""
return self.hostname_to_worker[hostname]
@Pyro4.expose
def get_tunnel_metadata(self, w1, w2):
"""Get metadata needed for tunnel creation
Args:
w1: Worker instance
w2: Worker instance
Returns:
Tuple of (ip1,ip2,tunnel id, tunnel key, tunnel interface name). Except
for ips all values are unique to that instance.
"""
tid = self.tunhelper.get_tun_nr()
tkey = self.tunhelper.get_key_nr()
intf = "mn_tun" + str(tid)
ip1 = w1.ip(classifier="backend")
ip2 = w2.ip(classifier="backend")
#use multiple IP addresses for the workers:
#modern NICs have multiple queues with own IRQs. This is called RSS. The queue a packet is enqueued in is determined by a hashing algorithm using the IP headers.
#unfortunatelly, most RSS implementations ignore the GRE headers.
#on GRE, most RSS hashing algorithms only use src-dest IP addresses to assign packets to queues which makes is necessary to provide multiple IP combinations per worker pair
#otherwise, all packets between a pair of workers would be assigned to the same queue.
if self.config.getint("all", "useMultipleIPs") > 1:
ip1_int = [int(a) for a in ip1.split(".")]
ip2_int = [int(a) for a in ip2.split(".")]
ip1_int[3] += random.randint(0, self.config.getint("all", "useMultipleIPs")-1)
ip2_int[3] += random.randint(0, self.config.getint("all", "useMultipleIPs")-1)
ip1 = "%d.%d.%d.%d" % tuple(ip1_int)
ip2 = "%d.%d.%d.%d" % tuple(ip2_int)
return (ip1, ip2, tid, tkey, intf)
@Pyro4.expose
def create_tunnel(self, w1, w2):
"""Create GRE tunnel between workers.
Create gre tunnel connecting worker machine w1 and w2 and return
name of created network interface. Querys TunHelper instance to
create name of tunnel.
Args:
w1: Worker instance.
w2: Woker instance.
Returns:
Network interface name of created tunnel.
"""
ip1, ip2, tid, tkey, intf = self.get_tunnel_metadata(w1, w2)
self.logger.debug("invoking tunnel create commands on " + ip1 +
" and " + ip2)
w1.run_script("create_tunnel.sh " + ip1 + " " + ip2 + " " + intf +
" " + str(tkey))
w2.run_script("create_tunnel.sh " + ip2 + " " + ip1 + " " + intf +
" " + str(tkey))
self.logger.debug("tunnel " + intf + " created.")
return intf
@Pyro4.expose
def remove_all_tunnels(self):
"""Shut down all tunnels on all workers."""
for worker in self.workers():
worker.run_script("delete_tunnels.sh")
#replace tunhelper instance as tunnel names/keys can be reused now.
self.tunhelper = TunHelper()
class Experiment(object):
"""Class to manage MaxiNet Experiment.
Use this class to specify an experiment. Experiments are created for
one-time-usage and have to be stopped in the end. One cluster
instance can run several experiments in sequence.
Attributes:
cluster: Cluster instance which will be used by this Experiment.
config: Config instance to queury config file.
controller: Controller class to use in Experiment.
hostname_to_workerid: Dict to map hostnames of workers to workerids
hosts: List of host NodeWrapper instances.
isMonitoring: True if monitoring is in use.
logger: Logging instance.
nodemapping: optional dict to map nodes to specific workers ids.
nodes: List of NodeWrapper instances.
node_to_worker: Dict to map node name (string) to worker instance.
node_to_wrapper: Dict to map node name (string) to NodeWrapper
instance.
origtopology: Unpartitioned topology if topology was partitioned
by MaxiNet.
shares: list to map worker ids to workload shares. shares[x] is used to
obtain the share of worker id x.
starttime: Time at which Experiment was instanciated. Used for
logfile creation.
switch: Default mininet switch class to use.
switches: List of switch NodeWrapper instances.
topology: instance of MaxiNet.Frontend.paritioner.Clustering
tunnellookup: Dict to map tunnel tuples (switchname1,switchname2)
to tunnel names. Order of switchnames can be ignored as both
directions are covered.
workerid_to_hostname: dict to map workerids to hostnames of workers to
worker ids.
"""
def __init__(self, cluster, topology, controller=None,
is_partitioned=False, switch=UserSwitch,
nodemapping=None, hostnamemapping=None, sharemapping=None):
"""Inits Experiment.
Args:
cluster: Cluster instance.
topology: mininet.topo.Topo (is_partitioned==False) or
MaxiNet.Frontend.partitioner.Clustering
(is_partitioned==True) instance.
controller: Optional IPv4 address of OpenFlow controller.
If not set controller IP from MaxiNet configuration will
be used.
is_partitioned: Optional flag to indicate whether topology
is already paritioned or not. Default is unpartitioned.
switch: Optional Switch class to use in Experiment. Default
is mininet.node.UserSwitch.
nodemapping: Optional dict to map nodes to specific worker
ids (nodename->workerid). If given needs to hold worker
ids for every node in topology.
hostnamemapping: Optional dict to map workers by hostname to
worker ids. If provided every worker hostname has to be mapped
to exactly one id. If the cluster consists of N workers valid ids
are 0 to N-1.
sharemapping: Optional list to map worker ids to workload shares.
sharemapping[x] is used to obtain the share of worker id x. Takes
precedence over shares configured in config file. If given needs
to hold share for every worker.
"""
self.cluster = cluster
self.logger = logging.getLogger(__name__)
self.topology = None
self.config = self.cluster.config
self.starttime = time.localtime()
self._printed_log_info = False
self.isMonitoring = False
self.shares = sharemapping
self.nodemapping = nodemapping
if is_partitioned:
self.topology = topology
else:
self.origtopology = topology
self.node_to_worker = {}
self.node_to_wrapper = {}
if(self.is_valid_hostname_mapping(hostnamemapping)):
self.hostname_to_workerid = hostnamemapping
else:
if(not hostnamemapping is None):
self.logger.error("invalid hostnamemapping!")
self.hostname_to_workerid = self.generate_hostname_mapping()
self.workerid_to_hostname = {}
for hn in self.hostname_to_workerid:
self.workerid_to_hostname[self.hostname_to_workerid[hn]] = hn
self._update_shares()
self.nodes = []
self.hosts = []
self.tunnellookup = {}
self.switches = []
self.switch = switch
if controller:
contr = controller
else:
contr = self.config.get_controller()
if contr.find(":") >= 0:
(host, port) = contr.split(":")
else:
host = contr
port = "6633"
self.controller = functools.partial(RemoteController, ip=host,
port=int(port))
def _update_shares(self):
"""helper function which reads workload shares per worker from
config file. Has no effect if shares are already configured"""
if(self.shares is None):
ts = [1] * self.cluster.num_workers()
for i in range(0, self.cluster.num_workers()):
if(self._get_config_share(i)):
ts[i] = self._get_config_share(i)
s = sum(ts)
self.shares = []
for i in range(0, self.cluster.num_workers()):
self.shares.append(float(ts[i])/float(s))
def _get_config_share(self, wid):
"""get workload share of worker with worker id wid"""
hn = self.workerid_to_hostname[wid]
if(self.config.has_section(hn) and self.config.has_option(hn, "share")):
return self.config.getint(hn, "share")
return None
def generate_hostname_mapping(self):
"""generates a hostname-> workerid mapping dictionary"""
i = 0
d = {}
for w in self.cluster.workers():
d[w.hn()] = i
i += 1
return d
def is_valid_hostname_mapping(self, d):
"""checks whether hostname -> workerid mappign is valid
(every worker has exactly one workerid, workerids are contiguos from 0
upwards)"""
if(d is None):
return False
if(len(d) != len(self.cluster.workers())):
return False
for w in self.cluster.workers():
if(not w.hn() in d.keys()):
return False
for i in range(0, len(self.cluster.workers())):
if (d.values().count(i) != 1):
return False
return True
def configLinkStatus(self, src, dst, status):
"""Change status of link.
Change status (up/down) of link between two nodes.
Args:
src: Node name or NodeWrapper instance.
dst: Node name or NodeWrapper instance.
status: String {up, down}.
"""
ws = self.get_worker(src)
wd = self.get_worker(dst)
if(ws == wd):
# src and dst are on same worker. let mininet handle this
ws.configLinkStatus(src, dst, status)
else:
src = self.get(src)
dst = self.get(dst)
intf = self.tunnellookup[(src.name, dst.name)]
src.cmd("ifconfig " + intf + " " + status)
dst.cmd("ifconfig " + intf + " " + status)
@deprecated
def find_worker(self, node):
"""Get worker instance which emulates the specified node.
Replaced by get_worker.
Args:
node: nodename or NodeWrapper instance.
Returns:
Worker instance
"""
return self.get_worker(node)
def get_worker(self, node):
"""Get worker instance which emulates the specified node
Args:
node: Nodename or NodeWrapper instance.
Returns:
Worker instance
"""
if(isinstance(node, NodeWrapper)):
return node.worker
return self.node_to_worker[node]
def get_log_folder(self):
"""Get folder to which log files will be saved.
Returns:
Logfile folder as String.
"""
return "/tmp/maxinet_logs/" + Tools.time_to_string(self.starttime) +\
"/"
def terminate_logging(self):
"""Stop logging."""
for worker in self.cluster.workers():
worker.run_cmd("killall mpstat getRxTx.sh getMemoryUsage.sh")
#get CPU logs
worker.get_file("/tmp/maxinet_cpu_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
#get memory logs
worker.get_file("/tmp/maxinet_mem_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
#get interface logs
intf = worker.run_cmd("ip addr show to " + worker.ip(classifier="backend") + "/24 " +
"| head -n1 | cut -d' ' -f2 | tr -d :")\
.strip()
worker.get_file("/tmp/maxinet_intf_" + intf + "_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
self._print_log_info()
self._print_monitor_info()
self.isMonitoring = False
def log_cpu(self):
"""Log cpu useage of workers.
Places log files in /tmp/maxinet_logs/.
"""
for worker in self.cluster.workers():
self.log_cpu_of_worker(worker)
def log_cpu_of_worker(self, worker):
"""Log cpu usage of worker.
Places log file in /tmp/maxinet_logs/.
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
worker.daemonize("LANG=en_EN.UTF-8 mpstat 1 | while read l; " +
"do echo -n \"`date +%s` \" ; echo \"$l \" ;" +
" done > \"/tmp/maxinet_cpu_" + str(self.hostname_to_workerid[worker.hn()]) +
"_(" + worker.hn() + ").log\"")
def log_free_memory(self):
"""Log memory usage of workers.
Places log files in /tmp/maxinet_logs.
Format is:
timestamp,FreeMemory,Buffers,Cached
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
for worker in self.cluster.workers():
worker.daemonize_script("getMemoryUsage.sh", " > \"/tmp/maxinet_mem_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log\"")
def log_interfaces_of_node(self, node):
"""Log statistics of interfaces of node.
Places logs in /tmp/maxinet_logs.
Format is:
timestamp,received bytes,sent bytes,received packets,sent packets
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
node = self.get(node)
worker = self.get_worker(node)
for intf in node.intfNames():
self.log_interface(worker, intf)
def log_interface(self, worker, intf):
"""Log statistics of interface of worker.
Places logs in /tmp/maxinet_logs.
Format is:
timestamp,received bytes,sent bytes,received packets,sent packets
"""
worker.daemonize_script("getRxTx.sh", " " + intf + " > \"/tmp/maxinet_intf_" +
intf + "_" + str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() +
").log\"")
def monitor(self):
"""Log statistics of worker interfaces and memory usage.
Places log files in /tmp/maxinet_logs.
"""
self.isMonitoring = True
self.log_free_memory()
self.log_cpu()
for worker in self.cluster.workers():
intf = worker.run_cmd("ip addr show to " + worker.ip(classifier="backend") + "/24 " +
"| head -n1 | cut -d' ' -f2 | tr -d :")\
.strip()
if(intf == ""):
self.logger.warn("could not find main eth interface for " +
worker.hn() + ". no logging possible.")
else:
self.log_interface(worker, intf)
def _print_log_info(self):
"""Place log info message in log if log functions where used.
Prints info one time only even if called multiple times.
"""
if(not self._printed_log_info):
self._printed_log_info = True
self.logger.info("Log files will be placed in /tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/." +
" You might want to save them somewhere else.")
def _print_monitor_info(self):
"""Place monitor info message in log if Experiment was monitored."""
self.logger.info("You monitored this experiment. To generate a graph" +
" from your logs call " +
"\"/usr/local/share/MaxiNet/maxinet_plot.py " +
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) +
"/ plot.png\" ")
def CLI(self, plocals, pglobals):
"""Open interactive command line interface.
Arguments are used to allow usage of python commands in the same
scope as the one where CLI was called.
Args:
plocals: Dictionary as returned by locals()
pglobals: Dictionary as returned by globals()
"""
CLI(self, plocals, pglobals)
def addNode(self, name, wid=None, pos=None):
"""Do bookkeeping to add a node at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
This does NOT actually create a Node object on the mininet
instance but is a helper function for addHost etc.
Args:
name: Node name.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
"""
if (wid is None):
wid = random.randint(0, self.cluster.num_workers() - 1)
if (not pos is None):
wid = self.hostname_to_workerid[self.node_to_worker[pos].hn()]
self.node_to_worker[name] = self.cluster.get_worker(self.workerid_to_hostname[wid])
self.node_to_wrapper[name] = NodeWrapper(name, self.get_worker(name))
self.nodes.append(self.node_to_wrapper[name])
def addHost(self, name, cls=None, wid=None, pos=None, **params):
"""Add host at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Host name.
cls: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet host class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addHost(name, cls=cls, **params)
self.hosts.append(self.get(name))
#deactivate TSO
if (self.config.deactivateTSO()):
for intf in self.get_node(name).intfNames():
self.get_node(name).cmd("sudo ethtool -K %s tso off" % intf)
#set MTU if necessary
if (self.config.run_with_1500_mtu()):
self.setMTU(self.get_node(name), 1450)
return self.get(name)
def addSwitch(self, name, cls=None, wid=None, pos=None, **params):
"""Add switch at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Switch name.
cls: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet switch class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addSwitch(name, cls, **params)
self.switches.append(self.get(name))
#set MTU if necessary
if (self.config.run_with_1500_mtu()):
self.setMTU(self.get_node(name), 1450)
return self.get(name)
def addController(self, name="c0", controller=None, wid=None, pos=None,
**params):
"""Add controller at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Controller name.
controller: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet controller class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addController(name, controller, **params)
return self.get(name)
def name(self, node):
"""Get name of network node.
Args:
node: Node name or NodeWrapper instance.
Returns:
String of node name.
"""
if(isinstance(node, NodeWrapper)):
return node.nn
return node
def addLink(self, node1, node2, port1=None, port2=None, cls=None,
autoconf=False, **params):
"""Add link at runtime.
Add link at runtime and create tunnels between workers if
necessary. Will not work for mininet.node.UserSwitch switches.
Be aware that tunnels will only work between switches so if you
want to create a link using a host at one side make sure that
both nodes are located on the same worker.
autoconf parameter handles attach() and config calls on switches and
hosts.
Args:
node1: Node name or NodeWrapper instance.
node2: Node name or NodeWrapper instance.
port1: Optional port number of link on node1.
port2: Optional port number of link on node2.
cls: Optional class to use on Link creation. Be aware that
only mininet.link.Link and mininet.link.TCLink are
supported for tunnels.
autoconf: mininet requires some calls to makIe newly added
tunnels work. If autoconf is set to True MaxiNet will
issue these calls automatically.
Raises:
RuntimeError: If cls is not None or Link or TCLink and
tunneling is needed.
"""
w1 = self.get_worker(node1)
w2 = self.get_worker(node2)
if(not isinstance(node1, NodeWrapper)):
node1 = self.get(node1)
if(not isinstance(node2, NodeWrapper)):
node2 = self.get(node2)
if(w1 == w2):
self.logger.debug("no tunneling needed")
l = w1.addLink(self.name(node1), self.name(node2), port1, port2,
cls, **params)
else:
self.logger.debug("tunneling needed")
if(not ((node1 in self.switches) and (node2 in self.switches))):
self.logger.error("We cannot create tunnels between switches" +
" and hosts. Sorry.")
raise RuntimeError("Can't create tunnel between switch and" +
"host")
if(not ((cls is None) or isinstance(cls, Link) or
isinstance(cls, TCLink))):
self.logger.error("Only Link or TCLink instances are " +
"supported by MaxiNet")
raise RuntimeError("Only Link or TCLink instances are " +
"supported by MaxiNet")
intfn = self.cluster.create_tunnel(w1, w2)
if((cls is None) or isinstance(cls, TCLink)):
intf = TCIntf
else:
intf = Intf
w1.addTunnel(intfn, self.name(node1), port1, intf, **params)
w2.addTunnel(intfn, self.name(node2), port2, intf, **params)
l = ((self.name(node1), intfn), (self.name(node2), intfn))
if(autoconf):
if(node1 in self.switches):
node1.attach(l[0][1])
else:
node1.configDefault()
if(node2 in self.switches):
node2.attach(l[1][1])
else:
node2.configDefault()
if(self.config.run_with_1500_mtu()):
self.setMTU(node1, 1450)
self.setMTU(node2, 1450)
def get_node(self, node):
"""Return NodeWrapper instance that is specified by nodename.
Args:
node: Nodename or nodewrapper instance.
Returns:
NodeWrapper instance with name nodename or None if none is
found.
"""
if(node in self.node_to_wrapper):
return self.node_to_wrapper[node]
else:
return None
def get(self, node):
"""Return NodeWrapper instance that is specified by nodename.
Alias for get_node.
Args:
node: Nodename or nodewrapper instance.
Returns:
NodeWrapper instance with name nodename or None if none is
found.
"""
return self.get_node(node)
def setup(self):
"""Start experiment.
Partition topology (if needed), assign topology parts to workers and
start mininet instances on workers.
Raises:
RuntimeError: If Cluster is too small.
"""
self.logger.info("Clustering topology...")
# partition topology (if needed)
if(not self.topology):
parti = Partitioner()
parti.loadtopo(self.origtopology)
if(self.nodemapping):
self.topology = parti.partition_using_map(self.nodemapping)
else:
self.topology = parti.partition(self.cluster.num_workers(),
shares=self.shares)
self.logger.debug("Tunnels: " + str(self.topology.getTunnels()))
subtopos = self.topology.getTopos()
if(len(subtopos) > self.cluster.num_workers()):
raise RuntimeError("Cluster does not have enough workers for " +
"given topology")
# initialize internal bookkeeping
for subtopo in subtopos:
for node in subtopo.nodes():
self.node_to_worker[node] = self.cluster.get_worker(self.workerid_to_hostname[subtopos.index(subtopo)])
self.nodes.append(NodeWrapper(node, self.get_worker(node)))
self.node_to_wrapper[node] = self.nodes[-1]
if (not subtopo.isSwitch(node)):
self.hosts.append(self.nodes[-1])
else:
self.switches.append(self.nodes[-1])
# create tunnels
tunnels = [[] for x in range(len(subtopos))]
stt_tunnels = []
for tunnel in self.topology.getTunnels():
w1 = self.get_worker(tunnel[0])
w2 = self.get_worker(tunnel[1])
if not self.config.use_stt_tunneling():
intf = self.cluster.create_tunnel(w1, w2)
else:
ip1, ip2, tid, tkey, intf = self.cluster.get_tunnel_metadata(w1, w2)
stt_tunnels.append((w1, w2, ip1, ip2, tid, tkey, intf))
self.tunnellookup[(tunnel[0], tunnel[1])] = intf
self.tunnellookup[(tunnel[1], tunnel[0])] = intf
for i in range(0, 2):
# Assumes that workerid = subtopoid
tunnels[self.hostname_to_workerid[self.node_to_worker[tunnel[i]].hn()]].append([intf,
tunnel[i],
tunnel[2]])
# start mininet instances
for topo in subtopos:
wid = subtopos.index(topo)
worker = self.cluster.get_worker(self.workerid_to_hostname[wid])
worker.set_switch(self.switch)
# cache hostname for possible error message
thn = worker.hn()
try:
if(self.controller):
worker.start(
topo=topo,
tunnels=tunnels[subtopos.index(topo)],
controller=self.controller)
else:
worker.start(
topo=topo,
tunnels=tunnels[wid])
except Pyro4.errors.ConnectionClosedError:
self.logger.error("Remote " + thn + " exited abnormally. " +
"This is probably due to mininet not" +
" starting up. You might want to have a look"+
" at the output of the MaxiNetWorker calls on"+
" the Worker machines.")
raise
# configure network if needed
if (self.config.run_with_1500_mtu()):
for topo in subtopos:
for host in topo.nodes():
self.setMTU(self.get(host), 1450)
#deactivate TSO if needed
if (self.config.deactivateTSO()):
for topo in subtopos:
for host in topo.nodes():
for intf in self.get(host).intfNames():
self.get(host).cmd("sudo ethtool -K %s tso off" % intf)
for (w1, w2, ip1, ip2, tid, tkey, intf) in stt_tunnels:
w1.run_cmd("ovs-vsctl -- set interface %s type=stt options=\"remote_ip=%s,local_ip=%s,key=%i\"" % (intf, ip2, ip1, tkey))
w2.run_cmd("ovs-vsctl -- set interface %s type=stt options=\"remote_ip=%s,local_ip=%s,key=%i\"" % (intf, ip1, ip2, tkey))
# start mininet instances
def setMTU(self, host, mtu):
"""Set MTUs of all Interfaces of mininet host.
Args:
host: NodeWrapper instance or nodename.
mtu: MTU value.
"""
if(not isinstance(host, NodeWrapper)):
host = self.get(host)
for intf in host.intfNames():
host.cmd("ifconfig %s mtu %i" % (intf, mtu))
@deprecated
def run_cmd_on_host(self, host, cmd):
"""Run cmd on mininet host.
Run cmd on emulated host specified by host and return
output.
This function is deprecated and will be removed in a future
version of MaxiNet. Use Experiment.get(node).cmd() instead.
Args:
host: Hostname or NodeWrapper instance.
cmd: Command to run as String.
"""
return self.get_worker(host).run_cmd_on_host(host, cmd)
def stop(self):
"""Stop experiment and shut down emulation on workers."""
if self.isMonitoring:
self.terminate_logging()
for worker in self.cluster.workers():
worker.stop()
self.cluster.remove_all_tunnels()
class NodeWrapper(object):
"""Wrapper that allows most commands that can be used in mininet to be
used in MaxiNet as well.
Whenever you call for example
> exp.get("h1")
you'll get an instance of NodeWrapper which will forward calls to
the respective mininet node.
Mininet method calls that SHOULD work:
"IP", "MAC", "attach", "cfsInfo", "cgroupGet", "cgroupDel",
"cgroupSet", "cleanup", "cmd", "cmdPrint", "config",
"configDefault", "connected", "controllerUUIDs", "defaultDpid",
"detach", "dpctl", "intfIsUp", "intfNames", "monitor", "newPort",
"pexec", "read", "readline", "rtInfo", "sendCmd", "sendInt",
"setARP", "setCPUFrac", "setCPUs", "setIP", "setup", "start",
"stop", "terminate", "waitOutput", "waitReadable", "write"
Mininet attributes that SHOULD be gettable:
"inNamespace", "name", "params", "waiting"
Containernet Docker Host method calls that SHOULD work:
"updateCpuLimit", "updateMemoryLimit", "cgroupSet", "cgroupGet",
"update_resources"
Containernet Docker Host attributes that SHOULD be gettable:
"dimage", "resources", "volumes"
Attributes:
nn: Node name as String.
worker: Worker instance on which node is hosted.
"""
# this feels like doing rpc via rpc...
def __init__(self, nodename, worker):
"""Inits NodeWrapper.
The NodeWrapper does not create a node on the worker. For this
reason the node should already exist on the Worker when
NodeWrapper.__init__ gets called.
Args:
nodename: Node name as String
worker: Worker instance
"""
self.nn = nodename
self.worker = worker
def is_docker(self):
"""Checks if the node wrapper belongs to a docker host."""
return self._get("__class__").__name__ == "Docker"
def _call(self, cmd, *params1, **params2):
"""Send method call to remote mininet instance and get return.
Args:
cmd: method name as String.
*params1: unnamed parameters for call.
**params2: named parameters for call.
"""
return self.worker.rpc(self.nn, cmd, *params1, **params2)
def _get(self, name):
"""Return attribut name of remote node."""
return self.worker.rattr(self.nn, name)
def __getattr__(self, name):
def method(*params1, **params2):
return self._call(name, *params1, **params2)
# The following commands and attributes did NOT work, when last tested.
# They are deactivated to avoid confusion. This is mostly caused by
# Pyro4 related serialization problems.
if name in [
# methods:
"addIntf", "checkListening", "chrt", "connectionsTo",
"defaultIntf", "deleteIntfs", "intf"
# attributes:
"nameToIntf"
]:
raise NotImplementedError(
str(name)
+ ": Explicitly disabled due to serialization problems. "
+ "To force access use the _call or _get methods manually. "
+ "Use at own risk."
)
# the following attributes and methods SHOULD work. no guarantee given
if name in [
"IP", "MAC", "attach", "cfsInfo", "cgroupGet", "cgroupDel",
"cgroupSet", "cleanup", "cmd", "cmdPrint", "config",
"configDefault", "connected", "controllerUUIDs", "defaultDpid",
"detach", "dpctl", "intfIsUp", "intfNames", "monitor", "newPort",
"pexec", "read", "readline", "rtInfo", "sendCmd", "sendInt",
"setARP", "setCPUFrac", "setCPUs", "setIP", "setup", "start",
"stop", "terminate", "waitOutput", "waitReadable", "write"
]:
return method
elif name in ["inNamespace", "name", "params", "waiting"]:
return self._get(name)
# Containernet specific
elif self.is_docker():
if name in ["updateCpuLimit", "updateMemoryLimit", "cgroupSet",
"cgroupGet", "update_resources"]:
return method
elif name in ["dimage", "resources", "volumes"]:
return self._get(name)
else:
raise AttributeError(name)
def __repr__(self):
return "NodeWrapper (" + self.nn + " at " + str(self.worker) + ")"
|
camera_feeder.py
|
import logging
import threading
import time
from typing import Optional
import cv2
import numpy as np
from custom_exceptions import CameraFeederInitException
logger = logging.getLogger(__name__)
class CameraFeeder:
CAPTURE_RETRY_MAX: int = 30
def __init__(self, src: str) -> None:
self._src: str = src
self._cap = cv2.VideoCapture(self._src, cv2.CAP_GSTREAMER)
self._frame: Optional[np.ndarray] = None
self._fail_count: int = 0
self._cap_thread = threading.Thread(target=self._update)
self._cap_thread_started: bool = False
self._read_lock = threading.Lock()
def _update(self) -> None:
while self._cap_thread_started:
ret, frame = self._cap.read()
with self._read_lock:
if ret:
self._frame = frame
self._fail_count = 0
else:
self._frame = None
self._fail_count += 1
if self._fail_count > CameraFeeder.CAPTURE_RETRY_MAX:
logger.warning("VideoCapture retry over, set capture again")
self._cap.release()
time.sleep(1)
self._cap = cv2.VideoCapture(self._src)
if self._cap.isOpened is False:
logger.error("VideoCapture is not opened")
raise CameraFeederInitException()
def start(self) -> None:
if self._cap.isOpened:
self._cap_thread_started = True
self._cap_thread.start()
else:
logger.error("VideoCapture is not opened")
raise CameraFeederInitException()
def read(self) -> Optional[np.ndarray]:
with self._read_lock:
if self._frame is None:
frame = None
else:
frame = self._frame.copy()
return frame
def stop(self) -> None:
self._cap_thread_started = False
self._cap_thread.join()
self._cap.release()
|
hipster_recon.py
|
#!/usr/bin/env python3
"""
Program: hipster_recon.py
Date: 01/27/2022
Author: Travis Phillips
Purpose: A pre-engagement script for confirming scope. Features include:
* Converts a CIDR notation IP into a range/list of IP addresses,
* Can dump a raw list and exit for creating target list.
* Performs ASIN WHOIS lookup on one of the IPs to get org
name information and subnet records
* Optionally, runs a scan looking for SSL ports (443, 8443) and
extracts issuer, common name, and alternative names from
their certs.
"""
import sys
import argparse
import ipaddress
import socket
import threading
import ssl
from queue import Queue
import requests
from cryptography import x509
from cryptography.x509.oid import NameOID
class PortScanner:
""" A port scanner class for multi-thread port scanning. """
def __init__(self, hosts: list, ports: list, verbose=False, timeout=1) -> None:
""" Initalize the port scanner class. """
self.hosts = hosts
self.ports = ports
self.verbose = verbose
self.timeout = timeout
self.results = {}
self.queue = Queue()
def _print(self, message: str) -> None:
""" Verbose print function for scanning. """
if self.verbose:
print(message)
def scan_host(self, host: str) -> None:
"""
Thread job that will scan the host for self.ports. If an open
port is found, then it will be added to the dictionary
self.results[host], which is a list of open ports.
"""
for port in self.ports:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(self.timeout)
result = sock.connect_ex((host,port))
if result == 0:
if host not in self.results:
self.results[host] = []
self._print(f" [*] {host}:{port} is open!")
self.results[host].append(port)
sock.close()
def _worker(self) -> None:
""" Thread worker function. Will pull a host from queue and run the scan. """
while True:
# Get a host from the queue
worker = self.queue.get()
self.scan_host(worker)
self.queue.task_done()
def run(self, nthreads=100) -> None:
""" Run the port scanner. This is a blocking call until scan is complete. """
print(" [*] Scanning for open SSL ports...")
for _x in range(nthreads):
thread = threading.Thread(target=self._worker)
thread.daemon = True
thread.start()
for host in self.hosts:
self.queue.put(host)
# block until all tasks are done
self.queue.join()
self._print(" [*] Scan Complete!\n")
########################################################################
# TABLE DRAWING FUNCTIONS
########################################################################
def row_has_lists(row: list) -> bool:
""" Returns if the dataset has list or not. """
for item in row:
if isinstance(item, list):
return True
return False
def normalize_dataset(dataset: list) -> list:
"""
Converts all row cells into lists containing the same
number of elements.
"""
new_dataset = []
for row in dataset:
new_row = []
if row_has_lists(row):
max_array_count = get_max_elements_in_row(row)
for item in row:
if isinstance(item, str):
new_item = [item]
for _idx in range(max_array_count-1):
new_item.append("")
else:
new_item = item.copy()
while len(new_item) < max_array_count:
new_item.append("")
new_row.append(new_item)
else:
for item in row:
new_row.append([item])
new_dataset.append(new_row)
return new_dataset
def get_max_elements_in_row(row: list) -> int:
"""
Loops through the dataset and gets the max elements in a list if
one is found in the list or 1 if all strings.
"""
max_array_count = 0
# get the max height of the cells.
for item in row:
if isinstance(item, list) and len(item) > max_array_count:
max_array_count = len(item)
elif isinstance(item, str) and max_array_count == 0:
max_array_count = 1
return max_array_count
def get_max_cell_widths(dataset: list) -> list:
""" get the max width of cells. """
max_widths = [0] * len(dataset[0])
for row in dataset:
for idx, cell in enumerate(row):
for item in cell:
if len(item) > max_widths[idx]:
max_widths[idx] = len(item)
return max_widths
def print_table_header(title: str, max_lengths: list) -> None:
""" Draws an ASCII table to represent a dataset. """
total_length = sum(max_lengths) + (2 * len(max_lengths)) + (len(max_lengths)-1)
print("")
row_sep(max_lengths, start="┌", sep='─', end="┐")
print(f"│{title.center(total_length)}│")
row_sep(max_lengths, sep="┬")
def row_sep(max_lengths: list, start="├", sep="┼", end="┤") -> None:
""" split row draw function. """
buf = start
for length in max_lengths:
buf += f"{'─'*(length+2)}{sep}"
buf = buf[:-1] + f"{end}"
print(buf)
def print_row(max_lengths: list, row: list, center=False) -> None:
""" Print the data row. """
max_array_count = get_max_elements_in_row(row)
buf = ""
for cell_idx in range(max_array_count):
if cell_idx == 0:
buf += "│"
else:
buf += "\n│"
for row_idx, _cell in enumerate(row):
if center:
buf += f" {row[row_idx][cell_idx].center(max_lengths[row_idx])} │"
else:
buf += f" {row[row_idx][cell_idx].ljust(max_lengths[row_idx])} │"
print(buf)
def draw_table(title: str, dataset: list) -> None:
""" Draws an ASCII table to represent a dataset. """
normalized_dataset = normalize_dataset(dataset)
max_lengths = get_max_cell_widths(normalized_dataset)
print_table_header(title, max_lengths)
for idx, row in enumerate(normalized_dataset):
# Print the row
if idx == 0:
# If idx is first row, it should be a table headers.
print_row(max_lengths, row, center=True)
else:
# This is a normal row.
print_row(max_lengths, row)
# Print a row seperator.
if idx == len(dataset)-1:
row_sep(max_lengths, start="└", sep='┴', end="┘")
else:
row_sep(max_lengths)
print("")
########################################################################
# WHOIS AND CERT ENUMERATION FUNCTIONS
########################################################################
def extract_arin_org_name(record: dict) -> str:
"""
Attempts to extract the customer name or org name from the record. If
this isn't possible, it will return an empty string.
"""
org_name = ""
if 'customerRef' in record and '@name' in record['customerRef']:
org_name = record['customerRef']['@name']
if '@handle' in record['customerRef']:
org_name += f" ({record['customerRef']['@handle']})"
elif 'orgRef' in record:
org_name = record['orgRef']['@name']
return org_name
def extract_arin_netblocks(netblock: dict) -> str:
"""
Attempts to extract the customer name or org name from the record. If
this isn't possible, it will return an empty string.
"""
if isinstance(netblock, list):
start_ip = []
end_ip = []
cidr = []
for row in netblock:
start_ip.append(row['startAddress']['$'])
end_ip.append(row['endAddress']['$'])
cidr.append(f"{row['startAddress']['$']}/{row['cidrLength']['$']}")
else:
start_ip = netblock['startAddress']['$']
end_ip = netblock['endAddress']['$']
cidr = f"{start_ip}/{netblock['cidrLength']['$']}"
return (start_ip, end_ip, cidr)
def get_arin_info(ip_addr: str) -> None:
""" Query the ARIN for IP information. """
records = [["CIDR", "Start", "End", "Organization"]]
url = f"https://whois.arin.net/rest/nets;q={ip_addr}?showDetails=true&showARIN=true"
headers = {"Accept": "application/json"}
res = requests.get(url, headers=headers)
if res.status_code == 200:
data = res.json()['nets']['net']
if isinstance(data, dict):
org_name = extract_arin_org_name(data)
netblock = data['netBlocks']['netBlock']
start_ip, end_ip, cidr = extract_arin_netblocks(netblock)
records.append([cidr, start_ip, end_ip, org_name])
else:
for record in data:
org_name = extract_arin_org_name(record)
netblock = record['netBlocks']['netBlock']
start_ip, end_ip, cidr = extract_arin_netblocks(netblock)
records.append([cidr, start_ip, end_ip, org_name])
draw_table("ARIN Records", records)
def get_ssl_cert(host: str, port: int) -> x509.Certificate:
""" Connect and get cert information. """
try:
ssl_info = ssl.get_server_certificate((host, port))
cert = x509.load_pem_x509_certificate(ssl_info.encode('utf-8'))
return cert
except ConnectionResetError:
return None
def get_common_name(cert: x509.Certificate) -> str:
""" Get the common name from a X509 cert. """
try:
names = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
return names[0].value
except x509.ExtensionNotFound:
return ""
def get_alternatives_names(cert: x509.Certificate) -> list:
""" Get the alternative names as a list from a X509 cert. """
try:
ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName)
return ext.value.get_values_for_type(x509.DNSName)
except x509.ExtensionNotFound:
return [""]
def get_issuer(cert: x509.Certificate) -> str:
""" Get the alternative names as a list from a X509 cert. """
try:
names = cert.issuer.get_attributes_for_oid(NameOID.COMMON_NAME)
return names[0].value
except x509.ExtensionNotFound:
return ""
########################################################################
# ARG PARSER & MISC FUNCTIONS
########################################################################
def print_range(ip_range: list) -> None:
""" Print the IP range. """
for ip_addr in ip_range:
print(f"{ip_addr}")
def parse_args() -> None:
""" Parse the arguments or show help. """
desc = " Converts a CIDR notation IP into a range/list of IP addresses,\n"
desc += " performs a ASIN WHOIS lookup on one of the IPs to get org name,\n"
desc += " and optionally runs a port scan for 443/8443 looking for SSL ports\n"
desc += " and extracts common and alt names and issuers from their certs."
epilog = "\n\nNOTES:\n\t---===[ Network Traffic ]===---\n"
epilog += " - IP Range listing requires no network traffic.\n"
epilog += " - ARIN will query API at https://whois.arin.net/.\n"
epilog += " - SSL Scan will send traffic to target subnet:\n"
epilog += " - Connect port scan against TCP ports 443 & 8443.\n"
epilog += " - Connect to ports for certificate extraction.\n\n"
epilog += "\n\n\t---===[ About the Name ]===---\n"
epilog += " This tool is called Hipster Recon because it does recon on CIDR...\n"
epilog += " Remember: A pun is just a joke that isn't fully groan.\n\n"
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc, epilog=epilog)
parser.add_argument('--list', '-l',
help="Dump a raw list of the CIDR IP range & exit. \
Useful for target list files",
action='store_true')
parser.add_argument('--sslscan', '-s',
help='Enable SSL Scan of the subnet',
action='store_true')
parser.add_argument('--timeout', default=1, type=int,
help="Socket timeout for SSL port scanning.")
parser.add_argument('--verbose', '-v',
help='Allows more output during SSL Scan',
action='store_true')
parser.add_argument('CIDR_IP',
help='CIDR Notation IP Subnet or single IP [Hostname not permitted]')
# If no arguments where provided, print help.
if len(sys.argv) == 1:
parser.print_help()
return None
# Parse arguments.
args = parser.parse_args()
return args
########################################################################
# MAIN LOGIC
########################################################################
def main() -> int:
""" Main Application Logic. """
args = parse_args()
if not args:
return 1
# Extract the IP range for the CIDR into a list
ip_range = [str(ip) for ip in ipaddress.IPv4Network(args.CIDR_IP, False).hosts()]
# Print the IP range as a list or dash range to the user.
if args.list:
print_range(ip_range)
return 0
print(f"\n [*] IP Range: {ip_range[0]} - {ip_range[-1]}")
# Query ASIN and dump a table of the Netblock records.
get_arin_info(ip_range[0])
if args.sslscan:
# Scan for open SSL ports
ssl_ports = [443, 8443]
scanner = PortScanner(ip_range, ssl_ports, verbose=args.verbose,
timeout=args.timeout)
scanner.run()
# Extract SSL Certs from live hosts
certs = [["Host/Port", "Issuer", "Common Name", "Alt. Names"]]
print(" [*] Grabbing SSL Certs...")
for host, ports in scanner.results.items():
for port in ports:
cert = get_ssl_cert(host, port)
if cert:
certs.append([f"{host}:{port}",
get_issuer(cert),
get_common_name(cert),
get_alternatives_names(cert)])
print(f" [*] {len(certs)-1} SSL certs obtained!")
if len(certs) > 1:
draw_table("SSL Certificates", certs)
return 0
if __name__ == "__main__":
sys.exit(main())
|
test_motor_tail.py
|
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from tornado import gen
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import threading
import time
import unittest
from tornado.testing import gen_test
import test
from test import MotorTest
class MotorTailTest(MotorTest):
@gen.coroutine
def _reset(self):
yield self.db.capped.drop()
# autoIndexId catches test bugs that try to insert duplicate _id's
yield self.db.create_collection(
'capped', capped=True, size=1000, autoIndexId=True)
yield self.db.uncapped.drop()
yield self.db.uncapped.insert({})
def setUp(self):
super(MotorTailTest, self).setUp()
self.io_loop.run_sync(self._reset)
def start_insertion_thread(self, pauses):
"""A thread that gradually inserts documents into a capped collection
"""
sync_db = test.env.sync_cx.motor_test
def add_docs():
i = 0
for pause in pauses:
time.sleep(pause)
sync_db.capped.insert({'_id': i})
i += 1
t = threading.Thread(target=add_docs)
t.start()
return t
# Need at least one pause > 4.5 seconds to ensure we recover when
# getMore times out
tail_pauses = (0, 1, 0, 1, 0, 5, 0, 0)
expected_duration = sum(tail_pauses) + 10 # Add 10 sec of fudge
@gen_test(timeout=expected_duration)
def test_tail(self):
expected = [{'_id': i} for i in range(len(self.tail_pauses))]
t = self.start_insertion_thread(self.tail_pauses)
capped = self.db.capped
results = []
time = self.io_loop.time
start = time()
cursor = capped.find(tailable=True, await_data=True)
while (results != expected
and time() - start < MotorTailTest.expected_duration):
while (yield cursor.fetch_next):
doc = cursor.next_object()
results.append(doc)
# If cursor was created while capped collection had no documents
# (i.e., before the thread inserted first doc), it dies
# immediately. Just restart it.
if not cursor.alive:
cursor = capped.find(tailable=True, await_data=True)
t.join()
self.assertEqual(expected, results)
if __name__ == '__main__':
unittest.main()
|
test_operator_gpu.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys
import os
import time
import multiprocessing as mp
import unittest
import mxnet as mx
import numpy as np
import unittest
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied
from test_operator import *
from test_optimizer import *
from test_random import *
from test_exc_handling import *
#from test_rnn import *
from test_sparse_ndarray import *
from test_sparse_operator import *
from test_ndarray import *
from test_subgraph_op import *
from test_contrib_operator import test_multibox_target_op
set_default_context(mx.gpu(0))
del test_support_vector_machine_l1_svm # noqa
del test_support_vector_machine_l2_svm # noqa
def check_countsketch(in_dim,out_dim,n):
data = mx.sym.Variable("data")
h = mx.sym.Variable("h")
s = mx.sym.Variable("s")
sym = mx.sym.contrib.count_sketch(data=data, h=h, s=s, name='countsketch',out_dim = out_dim)
shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s
arr = [mx.nd.empty(shape[i]) for i in range(3)]
arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]
x = np.random.uniform(-10, 10, shape[0])
arr[0][:] = x #input x
h = np.random.randint(0, out_dim, shape[1])
arr[1][:] = h #hash h
s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])
arr[2][:] = s #hash s
locations = {"data": x, "h": h, "s": s}
a = np.zeros((n,out_dim))
temp = np.multiply(x, s)
for num_sample in np.arange(0,n):
for idx in np.arange(0,in_dim):
a[num_sample][h[0][idx]] += temp[num_sample][idx]
check_symbolic_forward(sym, locations, [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
out_grad = mx.nd.empty((n,out_dim))
out_grad[:] = np.random.normal(-3, 3, (n,out_dim))
a = np.zeros((n,in_dim))
for j in np.arange(0,n):
for i in np.arange(0,in_dim):
a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
check_symbolic_backward(sym, locations, [out_grad], [a], rtol=1e-3, atol=1e-5, ctx=mx.gpu(0))
@with_seed()
def test_countsketch():
minindim = 40
maxindim = 100
minoutdim = 5
maxoutdim = 30
maxn = 200
in_dim = np.random.randint(minindim, maxindim)
out_dim = np.random.randint(minoutdim, maxoutdim)
n = np.random.randint(1, maxn)
check_countsketch(in_dim, out_dim, n)
def check_ifft(shape):
shape_old = shape
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1]*2)
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
shape = (shape[0],shape[1],shape[2],shape[3]*2)
sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train= True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
if len(shape) == 2:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[1]):
init_complex.real[:,i] = init[0][:,2*i]
init_complex.imag[:,i] = init[0][:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
init_complex = np.zeros(shape_old,dtype = np.complex64)
for i in range(0,shape_old[3]):
init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]
init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]
a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[1]):
temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(shape_old)
out_grad[:] = np.random.normal(-3, 3, shape_old)
for exe in exe_list:
exe.backward([out_grad])
temp = exe.grad_arrays[0].asnumpy()
temp = np.zeros(shape_old)
for i in range(shape_old[3]):
temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]
a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)
assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-5)
@with_seed()
def test_ifft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_ifft(shape)
def check_fft(shape):
sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
if len(shape) == 2:
if shape[1]%2 != 0:
lst = list(shape)
lst[1] = lst[1]*2
shape = tuple(lst)
shape_old = shape
if len(shape) == 4:
if shape[3]%2 != 0:
lst = list(shape)
lst[3] = lst[3]*2
shape = tuple(lst)
shape_old = shape
init = [np.random.normal(size=shape, scale=1.0)]
arr_grad = [mx.nd.empty(shape)]
ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]
exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]
for exe in exe_list:
for arr, iarr in zip(exe.arg_arrays, init):
arr[:] = iarr.astype(arr.dtype)
# forward
for exe in exe_list:
exe.forward(is_train=True)
out1 = [exe.outputs[0].asnumpy() for exe in exe_list]
out = np.fft.fft(init, n=None, axis=-1, norm=None)
if len(shape) == 2:
out = np.reshape(out,(out.shape[1],out.shape[2]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
p = 0
for i in range(out2.shape[1]//2):
a[:,p] = out2[:,i]
a[:,p+1] = out2[:,i+out2.shape[1]//2]
p = p+2
if len(shape) == 4:
out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))
out2 = np.append(out.real, out.imag, axis = 1)
a = np.zeros(out1[0].shape)
for i in range(out1[0].shape[0]):
for j in range(out1[0].shape[1]):
p = 0
for k in range(out2.shape[3]):
a[i,j,:,p] = out2[i,j,:,k]
a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]
p = p+2
assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-5)
# backward
if len(shape) == 2:
out_grad = mx.nd.empty((shape[0],2*shape[1]))
out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[1]):
out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]
out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-5)
if len(shape) == 4:
out_grad = mx.nd.empty(out1[0].shape)
out_grad[:] = np.random.normal(-3, 3, out1[0].shape)
# out_grad_to_complex
out_grad_complex = np.zeros(shape,dtype = np.complex64)
for i in range(0,shape[3]):
out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]
out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]
for exe in exe_list:
exe.backward([out_grad])
a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-5)
@with_seed()
def test_fft():
nrepeat = 2
maxdim = 10
for repeat in range(nrepeat):
for order in [2,4]:
shape = tuple(np.random.randint(1, maxdim, size=order))
check_fft(shape)
@with_seed()
def test_batchnorm_with_type():
ctx_list_v1_2D = [
{'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
]
ctx_list_v2_2D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_1D = [
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (5, 2, 5), 'type_dict': {'norm_data': np.float64}},
]
ctx_list_v2_3D = [
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.cpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float16}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float32}},
{'ctx': mx.gpu(0), 'norm_data': (3, 2, 3, 2, 3), 'type_dict': {'norm_data': np.float64}}
]
# V1, 2D
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)
check_consistency(sym, ctx_list_v1_2D)
sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)
check_consistency(sym, ctx_list_v1_2D)
# V2, 2D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_2D)
# V2, 1D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_1D)
#
# # V2, 3D
sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)
check_consistency(sym, ctx_list_v2_3D)
@with_seed()
def test_batchnorm_versions():
def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):
ctx_list = []
sym_list = []
# BatchNormV1 cpu
if 'batchnorm_v1_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNormV1 gpu (organic)
if 'batchnorm_v1_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm cpu
if 'batchnorm_cpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm'))
# BatchNorm gpu (organic)
if 'batchnorm_gpu' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=True))
# BatchNorm gpu cudnn (if cudnn is enabled)
if 'batchnorm_cudnn' in batchnorm_op_list:
ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})
sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,
use_global_stats=use_global_stats,
name='batchnorm', cudnn_off=False))
check_consistency(sym_list, ctx_list)
def test_1d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 20)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_2d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 10, 10)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
'batchnorm_cpu',
'batchnorm_gpu', 'batchnorm_cudnn'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
def test_3d_batchnorm(fix_gamma, use_global_stats):
data = (2, 3, 3, 5, 5)
test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
'batchnorm_gpu'],
data=data,
fix_gamma=fix_gamma, use_global_stats=use_global_stats)
test_1d_batchnorm(True, False)
test_1d_batchnorm(False, False)
test_1d_batchnorm(False, True)
test_1d_batchnorm(True, True)
test_2d_batchnorm(True, False)
test_2d_batchnorm(False, False)
test_2d_batchnorm(False, True)
test_2d_batchnorm(True, True)
test_3d_batchnorm(True, False)
test_3d_batchnorm(False, False)
test_3d_batchnorm(False, True)
test_3d_batchnorm(True, True)
@with_seed(1234)
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_convolution_with_type():
sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')
data = mx.sym.Variable('conv_data')
w = mx.sym.Variable('conv_weight')
b = mx.sym.Variable('conv_bias')
w = mx.sym.transpose(w, axes=(0,2,3,1))
sym2 = mx.sym.transpose(data, axes=(0,2,3,1))
sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))
sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')
sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},
# NHWC
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),
'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}
]
# wider tolerance needed for true-fp16 NCHW test above
tol = {np.dtype(np.float16): 0.5,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
# Apply N symbols against each of M contexts, checking that all NxM combinations match.
def check_consistency_NxM(sym_list, ctx_list):
# e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
# sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list), scale=0.5)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/10141")
@with_seed()
def test_convolution_options():
# 1D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D convolution
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 3D convolution
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 1x1 convolution
sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')
sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Helper function to run tests in a subprocess to avoid save/restore of os.environ.
# Also avoids issues of cached environment variable lookups in the backend.
def _test_in_separate_process(func, env, *args):
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
seed = np.random.randint(0,1024*1024*1024)
for (key, value) in env.items():
os.environ[key] = str(value)
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
def _conv_with_num_streams(seed):
with random_seed(seed):
# Try to expose timing-dependent improper workspace sharing by parallel dgrad and wgrad
num_trials = 20
for _ in range(num_trials):
size = np.random.randint(32, 128)
# The cudnn conv operator runs dgrad and wgrad in separate streams if enabled, with possible
# kernel overlap. The non-cudnn conv op doesn't do this so is used as the 'golden copy'.
ctx = {'ctx': mx.gpu(0), 'conv_data': (2, 2, size, size),
'type_dict': {'conv_data': np.float32}}
# Adding 'flip' here isolates the model from the input node (which can't use inplace store)
flipped = mx.sym.flip(axis=0, name='conv')
sym = mx.sym.Convolution(data=flipped, num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
flipped_no_cudnn = mx.sym.flip(axis=0, name='conv')
sym_no_cudnn = mx.sym.Convolution(data=flipped_no_cudnn, num_filter=3, kernel=(3,3), pad=(1,1),
cudnn_off=True, name='conv')
try:
# tol can be pretty high- we're looking for a large diff due to garbaged workspace
check_consistency([sym, sym_no_cudnn], [ctx, ctx], tol=1e-2)
except:
print('Failing conv size = {}'.format(size))
raise
@with_seed()
def test_convolution_multiple_streams():
for num_streams in [1, 2]:
for engine in ['NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice']:
_test_in_separate_process(_conv_with_num_streams,
{'MXNET_GPU_WORKER_NSTREAMS' : num_streams, 'MXNET_ENGINE_TYPE' : engine})
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_convolution_large_c():
problematic_c = 64 * 1024
# The convolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCW', num_filter=8, kernel=(2,), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (1, problematic_c, 2, width), 'type_dict': {'conv_data': np.float64}}]
sym = mx.sym.Convolution(layout='NCHW', num_filter=4, kernel=(2,2), name='conv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
# This test is designed to expose an issue with cudnn v7.1.4 algo find() when invoked with large c.
# Algos returned by find() can fail to run with grad_req='add' (wgrad kernel beta parameter == 1.0f).
@with_seed()
def test_deconvolution_large_c():
problematic_c = 64 * 1024
# The deconvolution accumulates many values, so set large tolerances.
tol = {np.dtype(np.float32): 1,
np.dtype(np.float64): 1}
def test_1D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCW', num_filter=problematic_c, kernel=(2,), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
def test_2D_with_width(width, grad_req):
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (1, 8, 2, width), 'type_dict': {'deconv_data': np.float64}}]
sym = mx.sym.Deconvolution(layout='NCHW', num_filter=problematic_c, kernel=(2,2), name='deconv')
check_consistency([sym, sym], ctx_list, tol=tol, grad_req=grad_req)
# Run with different data tensor shapes to run cudnnFind() multiple times.
# First, populate algo and op caches with models that always use cudnnFind() (req == 'write').
# Then run models that must avoid cached cudnnFind() results in some cases (req == 'add').
widths = [4, 16, 64]
for req in ['write', 'add']:
for width in widths:
test_1D_with_width(width, req)
test_2D_with_width(width, req)
@with_seed()
def test_convolution_versions():
# 2D convolution NCHW
ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')
syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# 3D convolution NCDHW
ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},
{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
syms = [conv_cudnn, conv_cpu, conv_gpu]
check_consistency(syms, ctx_list)
# More max-pooling strides and pads to test cudnn pooling implementation code paths
@with_seed()
def test_pooling_nhwc_with_convention():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for in_shape in [(3, 4, 8, 8), (2, 2, 20, 20)]:
for kernel in [(2,2), (3,3), (4,4)]:
for stride in [(1,1), (1,2), (2,1), (2,2)]:
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': in_shape,
'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='valid', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=kernel, pool_type='max', stride=stride,
pooling_convention='full', name='pool')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(kernel=(300,300), pool_type='max',
global_pool=True, name='pool')
check_consistency_NxM(symlist, ctx_list)
def test_pooling_with_type():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
def test_deconvolution_with_type():
# Test basic deconvolution without exercising stride, pad or dilation.
# 1D deconvolution
sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
# 2D deconvolution
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# wider tolerance needed for true-fp16 test above
tol = {np.dtype(np.float16): 0.3,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0}
check_consistency(sym, ctx_list, tol=tol)
check_consistency(sym, ctx_list, tol=tol, grad_req="add")
@with_seed()
def test_deconvolution_options():
# 1D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# 2D deconvolution
ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}},
{'ctx': mx.gpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float16}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float64}},
{'ctx': mx.cpu(0), 'deconv_data': (2, 8, 10, 10), 'type_dict': {'deconv_data': np.float32}}]
# Pad > 0
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Stride > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# Dilate > 1
sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')
sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # 3D deconvolution (not yet enabled)
# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]
# # Pad > 0
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
# # Stride > 1
# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')
# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')
# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
@with_seed(1234)
def test_bilinear_sampler_with_type():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym = mx.sym.BilinearSampler(data=data, grid=grid)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}},
{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float16}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),
'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_grid_generator_with_type():
data = mx.sym.Variable('data')
sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))
ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},
{'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_spatial_transformer_with_type():
data = mx.sym.Variable('data')
loc = mx.sym.Flatten(data)
loc = mx.sym.FullyConnected(data=loc, num_hidden=10)
loc = mx.sym.Activation(data=loc, act_type='relu')
loc = mx.sym.FullyConnected(data=loc, num_hidden=6)
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=True)
ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}},
{'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float64}}]
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),
transform_type="affine", sampler_type="bilinear", cudnn_off=False)
check_consistency(sym, ctx_list)
check_consistency(sym, ctx_list, grad_req="add")
@with_seed()
def test_pooling_with_type2():
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare cpu and gpu results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}},
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency(sym, ctx_list)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
@with_seed()
def test_pooling_nhwc_with_type():
def make_pooling_syms(**kwargs):
# Conventional NCHW layout pooling
sym = mx.sym.Pooling(**kwargs)
# NHWC pooling
data = mx.sym.Variable('pool_data')
sym_nhwc = mx.sym.transpose(data, axes=(0,2,3,1))
sym_nhwc = mx.sym.Pooling(sym_nhwc, layout='NHWC', **kwargs)
sym_nhwc = mx.sym.transpose(sym_nhwc, axes=(0,3,1,2), name='pool')
return [sym, sym_nhwc]
# While the float32 and float64 output is reliably consistent, float16 departs occasionally.
# We compare nhwc and nchw results only within a given precision.
for data_type in [np.float64, np.float32, np.float16]:
# NHWC pooling only enabled on GPU with CUDNN
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': data_type}}]
symlist = make_pooling_syms(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency_NxM(symlist, ctx_list)
symlist = make_pooling_syms(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
check_consistency_NxM(symlist, ctx_list)
@with_seed()
def test_pooling_versions():
# Produce the name of the 'transposed' layout, given the dimension
def transposed_layout(ndim):
if ndim < 3 or ndim > 5:
raise RuntimeError("Invalid data dim, expecting 3, 4 or 5")
return ('NWC', 'NHWC', 'NDHWC')[ndim-3]
# default padding is all zeros
def is_default_pad(pad):
return pad == (0,) * len(pad)
# default stride is all ones
def is_default_stride(stride):
return stride == (1,) * len(stride)
# returns True/False randomly with equal probability
def random_choice():
return np.random.random(1)[0] < 0.5
def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,
pooling_convention='valid', global_pool=False, p_value=2,
count_include_pad=True, tol=None, dtype=np.float32):
ctx_list = []
sym_list = []
for pool_ctx in pool_op_list:
(pool_op, ctx_type) = pool_ctx.rsplit('_', 1)
expected_ctxs = ['cpu', 'gpu', 'cudnn']
if ctx_type not in expected_ctxs:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_ctxs, ctx_type))
ctx = mx.cpu(0) if ctx_type == 'cpu' else mx.gpu(0)
ctx_list.append({'ctx': ctx, 'pool_data': data, 'type_dict': {'pool_data': dtype}})
# start with pool args present in all cases
pool_op_args = {'kernel': kernel, 'pool_type': pool_type,
'pooling_convention' : pooling_convention, 'name' : 'pool'}
# add other args as needed
if global_pool:
pool_op_args['global_pool'] = True
else:
# Add pad and stride param if needed, plus randomly when it matches the default
if not is_default_pad(pad) or random_choice():
pool_op_args.update({'pad' : pad})
if not is_default_stride(stride) or random_choice():
pool_op_args.update({'stride' : stride})
expected_pool_ops = ['pool', 'pool_transposed', 'pool_v1']
if pool_op == 'pool_v1':
sym = mx.sym.Pooling_v1(**pool_op_args)
else:
pool_op_args.update({'p_value' : p_value, 'count_include_pad' : count_include_pad})
if ctx_type != 'cpu':
pool_op_args['cudnn_off'] = ctx_type == 'gpu'
if pool_op == 'pool':
# isolate pooling input from symbol input to test shared tensor optimizations
buffered_input = mx.sym.identity(name='pool')
sym = mx.sym.Pooling(buffered_input, **pool_op_args)
elif pool_op == 'pool_transposed':
ndim = len(data)
# NCW->NWC axes=(0,2,1) NCHW->NHWC axes=(0,2,3,1) NCDHW->NDHWC axes=(0,2,3,4,1);
axes = (0,) + tuple(range(2,ndim)) + (1,)
transposed = mx.sym.transpose(axes=axes, name='pool')
pooled = mx.sym.Pooling(data=transposed, layout=transposed_layout(ndim),
**pool_op_args)
# NWC->NCW axes=(0,2,1) NHWC->NCHW axes=(0,3,1,2) NDHWC->NCDHW axes=(0,4,1,2,3);
axes = (0, ndim-1) + tuple(range(1,ndim-1))
sym = mx.sym.transpose(data=pooled, axes=axes, name='pool')
else:
raise RuntimeError('Expected one of {}, saw {}.'.format(expected_pool_ops,
pool_op))
sym_list.append(sym)
check_consistency(sym_list, ctx_list, equal_nan=(not count_include_pad), tol=tol)
def test_pooling_dim(dim, pool_type, dtype, pool_op_list, p_value=2, count_include_pad=True,
tol=None):
if dim == '1D':
data = (3, 3, 10)
kernels = [(4,), (4,), (5,)]
pads = [(0,), (2,), (2,)]
strides = [(1,), (2,), (1,)]
elif dim == '2D_no_padding':
data = (3, 2, 20, 20)
kernels = [(3, 3), (4, 5)]
pads = [(0, 0), (0, 0)]
strides = [(1, 1), (2, 1)]
elif dim == '2D':
data = (2, 2, 20, 20)
kernels = [(3, 3), (3, 5), (4, 5), (4, 5)]
pads = [(0, 0), (1, 2), (0, 0), (2, 3)]
strides = [(1, 1), (1, 1), (2, 1), (1, 1)]
elif dim == '3D':
data = (2, 3, 20, 20, 20)
kernels = [(4, 5, 3), (4, 5, 3), (3, 5, 7)]
pads = [(0, 0, 0), (2, 3, 2), (1, 2, 3)]
strides = [(1, 1, 1), (2, 3, 1), (1, 1, 1)]
else:
raise RuntimeError('Unexpected pooling test class: {}.'.format(dim))
for kernel, pad, stride in zip(kernels, pads, strides):
for pooling_convention in ['valid', 'full']:
try:
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=pad, stride=stride,
pool_type=pool_type, pooling_convention=pooling_convention,
global_pool=False, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
except:
print('pool_op_list = {}'.format(pool_op_list))
print('kernel={}, pad={}, stride={}'.format(kernel, pad, stride))
print('pool_type={}, pooling_convention={}, global_pool=False'.format(pool_type,
pooling_convention))
print('p_value={}, count_include_pad={}, dtype={}'.format(p_value,
count_include_pad, dtype))
print('environ = \n{}'.format(os.environ))
raise
# Make sure kernel is ignored during global_pool by sometimes setting it to a crazy value
kernel = kernels[0]
if random_choice():
kernel = (300,) * len(kernel)
test_pooling_versions_helper(pool_op_list=pool_op_list,
data=data, kernel=kernel, pad=None, stride=None,
pool_type=pool_type, global_pool=True, p_value=p_value,
count_include_pad=count_include_pad, tol=tol, dtype=dtype)
# The various implementations of the standard pooling operator
std_pool_op_list = ['pool_cpu', 'pool_transposed_cpu',
'pool_gpu', 'pool_transposed_gpu',
'pool_cudnn', 'pool_transposed_cudnn']
# The implementations of the 'v1' pooling operator
v1_pool_op_list = ['pool_v1_cpu', 'pool_v1_gpu']
# For those cases when all implementations should match- the combined implementation list.
combo_pool_op_list = std_pool_op_list + v1_pool_op_list
for dtype in [np.float32, np.float64, np.float16]:
# Testing of the standard (not 'v1') pooling operator is universal across all
# data dimensions, implementations and layouts.
for dim in ['1D', '2D', '3D']:
test_pooling_dim(dim, 'max', dtype, std_pool_op_list)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=True)
test_pooling_dim(dim, 'avg', dtype, std_pool_op_list, count_include_pad=False)
test_pooling_dim(dim, 'sum', dtype, std_pool_op_list)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=1)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=2)
test_pooling_dim(dim, 'lp', dtype, std_pool_op_list, p_value=3)
# Testing of the 'v1' pooling operator is over its restricted support domain of
# 2D data only and not with the 'lp' pooling type. The 'v1' cpu and gpu versions are
# always tested against each other, and sometimes against the standard operator versions.
# The slightly different 'v1' definition prevents this in the following cases:
#
# 1. In max pooling, when multiple input values are the maximum in the input window,
# the 'v1' implementation backprops the gradient to all maxima, whereas the standard
# pooling operator backprops the gradient to the lowest-indexed maximum only.
# 2. In max pooling, the 'v1' operator pads with 0's and this value can become the
# maximum output value in the case of an all-negative input. The standard pooling
# operator effectively considers the padding to be the largest negative value, so
# only input values should appear in the output.
# 3. In avg pooling, the 'v1' operator divides the sum by the same window size factor,
# even at the edges, and so does not support count_include_pad = False.
# 4. The float16 'v1' pooling operator performs forward sums and averages in
# float16, whereas the std operators perform those calculations in float32, so
# greater float16 tolerances are needed when comparing across implementations.
# Double the float16 tol when comparing v1 and non-v1 implemenations, per note 4 above.
relaxed_tol = {np.dtype(np.float16): 2e-1,
np.dtype(np.float32): 1e-3,
np.dtype(np.float64): 1e-5,
np.dtype(np.uint8): 0,
np.dtype(np.int32): 0,
np.dtype(np.int64): 0}
# Exclude std implementations due to points 1 and 2 above.
test_pooling_dim('2D', 'max', dtype, v1_pool_op_list)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'avg', dtype, combo_pool_op_list, count_include_pad=True,
tol=relaxed_tol)
# Exclude std implementations due to point 3 above.
test_pooling_dim('2D', 'avg', dtype, v1_pool_op_list, count_include_pad=False)
# The standard and 'v1' implementations match for this case.
test_pooling_dim('2D', 'sum', dtype, combo_pool_op_list, tol=relaxed_tol)
# We can compare the standard and 'v1' max pooling implementations if we eliminate padding
# (see point 2 above) and use np.float64 data so that no two random input window values are
# likely to be the same (see point 1 above).
test_pooling_dim('2D_no_padding', 'max', np.float64, combo_pool_op_list)
@with_seed()
def test_pooling_full_2d():
def test_pooling_full_2d_type(pool_type):
data = (2, 2, 10, 10)
kernel = (4, 5)
pad = (1, 2)
stride = (3, 4)
convention = 'full'
ctx_list = []
sym_list = []
# o_h = ceil((10 + 1 + 1 - 4) / 3) + 1 = 4
# o_w = ceil((10 + 2 + 2 - 5) / 4) + 1 = 4
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=convention, global_pool=False, name='pool'))
check_consistency(sym_list, ctx_list)
test_pooling_full_2d_type('max')
test_pooling_full_2d_type('avg')
test_pooling_full_2d_type('sum')
@with_seed()
def test_global_pooling():
def test_1d_pooling(pool_type, p_value=2):
data = (2, 3, 20)
kernel = (4,)
pad = (2,)
stride = (2,)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool', p_value=p_value))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
def test_2d_pooling(pool_type, p_value=2):
data = (2, 3, 20, 20)
kernel = (4, 4)
pad = (2, 2)
stride = (2, 2)
ctx_list = []
sym_list = []
pooling_convention = 'valid'
if pool_type != 'lp':
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=False, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
sym_list.append(mx.sym.Pooling(pool_type=pool_type,
pooling_convention=pooling_convention, global_pool=True, p_value=p_value, cudnn_off=True, name='pool'))
check_consistency(sym_list, ctx_list)
test_1d_pooling('max')
test_1d_pooling('avg')
test_1d_pooling('sum')
test_1d_pooling('lp', p_value=1)
test_1d_pooling('lp', p_value=2)
test_1d_pooling('lp', p_value=3)
test_2d_pooling('max')
test_2d_pooling('avg')
test_2d_pooling('sum')
test_2d_pooling('lp', p_value=1)
test_2d_pooling('lp', p_value=2)
test_2d_pooling('lp', p_value=3)
@with_seed()
def test_upsampling_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},
{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},
{'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_upsampling_bilinear_with_type():
sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)
ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},
{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},
{'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_concat_with_type():
sym = mx.sym.Concat(name='concat', num_args=2)
ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},
{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},
{'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),
'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_elementwisesum_with_type():
dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
[mx.cpu(0), [np.float64, np.float32]] ]
for num_args in range(1, 6):
ews_arg_shape = {}
for i in range(num_args):
ews_arg_shape['ews_arg'+str(i)] = (2, 10)
sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
ctx_list = []
for dev, types in dev_types:
for dtype in types:
ews_arg_dtype = {'type_dict':{}}
for i in range(num_args):
ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
ctx_elem = {'ctx': dev}
ctx_elem.update(ews_arg_shape)
ctx_elem.update(ews_arg_dtype)
ctx_list.append(ctx_elem)
check_consistency(sym, ctx_list)
@with_seed()
def test_reshape_with_type():
sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))
ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},
{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},
{'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_blockgrad_with_type():
sym = mx.sym.BlockGrad(name='bg')
ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},
{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},
{'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_swapaxis_with_type():
sym = mx.sym.SwapAxis(name='swap', dim1=1)
ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},
{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},
{'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_fullyconnected_with_type():
sym = mx.sym.FullyConnected(num_hidden=3, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},
{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},
{'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
# Sizes are divisible by 8 to test TensorCore on Volta GPU.
sym = mx.sym.FullyConnected(num_hidden=8, name='inner')
ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},
{'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_activation_with_type():
act_types = ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']
shape = (2, 2, 10, 10)
for act_type in act_types:
sym = mx.sym.Activation(name='act', act_type=act_type)
ctx_list = [{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.gpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float64}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float32}},
{'ctx': mx.cpu(0), 'act_data': shape, 'type_dict': {'act_data': np.float16}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_lrn():
sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')
ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},
{'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]
check_consistency(sym, ctx_list)
@with_seed()
def test_embedding_with_type():
def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
NVD = [[20, 10, 20], [200, 10, 300]]
for N, V, D in NVD:
sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
ctx_list = []
for data_type in data_types:
for weight_type in weight_types:
ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
arg_params=arg_params)
data_types = [np.float16, np.float32, np.float64, np.int32]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 5, 5)
data_types = [np.uint8]
weight_types = [np.float16, np.float32, np.float64]
test_embedding_helper(data_types, weight_types, 0, 5)
@with_seed()
def test_svmoutput_with_type():
sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)
ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},
{'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]
check_consistency(sym, ctx_list, use_uniform=True)
@with_seed()
def test_take_with_type():
sym = mx.sym.take(name='take')
for data_ndim in range(2, 5):
for idx_ndim in range(1, 4):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=3, high=6), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=3, high=5), )
ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.gpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float64,
'take_a': np.float64}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float32,
'take_a': np.float32}},
{'ctx': mx.cpu(0), 'take_indices': idx_shape,
'take_a': data_shape,
'type_dict': {'take_indices': np.float16,
'take_a': np.float16}}]
arg_params = {'take_indices': np.random.randint(low=0,
high=data_shape[0],
size=idx_shape),
'take_a': np.random.normal(size=data_shape)}
check_consistency(sym, ctx_list,
grad_req={'take_indices': 'null',
'take_a': 'write'},
arg_params=arg_params)
def check_rnn_consistency(cell1, cell2):
dshape = (32, 5, 200)
data = mx.sym.Variable('data')
sym1, _ = cell1.unroll(5, data, merge_outputs=True)
mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)
sym2, _ = cell2.unroll(5, data, merge_outputs=True)
mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_forget_bias():
forget_bias = 2.0
fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')
dshape = (32, 1, 20)
data = mx.sym.Variable('data')
sym, _ = fused.unroll(1, data, merge_outputs=True)
mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))
mod.bind(data_shapes=[('data', dshape)], label_shapes=None)
mod.init_params()
args, auxs = mod.get_params()
args = fused.unpack_weights(args)
bias_name = next(x for x in args if x.endswith('f_bias'))
expected_bias = forget_bias * np.ones(10, )
assert_allclose(args[bias_name].asnumpy(), expected_bias)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(100, prefix='l0_'))
stack.add(mx.rnn.GRUCell(100, prefix='l1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_bidirectional():
fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',
bidirectional=True)
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l0_'),
mx.rnn.GRUCell(100, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(100, prefix='l1_'),
mx.rnn.GRUCell(100, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_unfuse():
for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
fused = mx.rnn.FusedRNNCell(
100, num_layers=2, mode=mode,
prefix='test_%s'%mode,
bidirectional=True,
dropout=0.5)
stack = fused.unfuse()
check_rnn_consistency(fused, stack)
check_rnn_consistency(stack, fused)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.gpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_psroipooling_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3,
np.dtype(np.float16): 1e-2}
arg_params = {
'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# deformable psroipooling
sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,
output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')
ctx_list = [{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,
'deformable_psroipool_trans': np.float64}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,
'deformable_psroipool_trans': np.float32}},
{'ctx': mx.gpu(0),
'deformable_psroipool_data': (1, 18, 14, 14),
'deformable_psroipool_rois': (2, 5),
'deformable_psroipool_trans': (2, 4, 3, 3),
'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,
'deformable_psroipool_trans': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_psroipool_data': 'write',
'deformable_psroipool_rois': 'null',
'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution_with_type():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 10, 10),
'deformable_conv_offset': (2, 18, 8, 8),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 10, 10),
# 'deformable_conv_offset': (2, 18, 8, 8),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},
]
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# test ability to turn off training on bias
check_consistency(sym, ctx_list, scale=0.1, tol=tol,
grad_req={'deformable_conv_data': 'write',
'deformable_conv_offset': 'write',
'deformable_conv_weight': 'write',
'deformable_conv_bias': 'null'})
@with_seed()
def test_deformable_convolution_options():
tol = {np.dtype(np.float32): 1e-1,
np.dtype(np.float64): 1e-3}
# 2D convolution
# Pad > 0
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 7, 7),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Stride > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Dilate > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 18, 3, 3),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')
check_consistency(sym, ctx_list, scale=0.1, tol=tol)
# Deformable group > 1
# since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here
ctx_list = [{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},
{'ctx': mx.gpu(0),
'deformable_conv_data': (2, 2, 7, 7),
'deformable_conv_offset': (2, 36, 5, 5),
'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},
# {'ctx': mx.gpu(0),
# 'deformable_conv_data': (2, 2, 7, 7),
# 'deformable_conv_offset': (2, 36, 5, 5),
# 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},
]
sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
name='deformable_conv')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_residual_fused():
cell = mx.rnn.ResidualCell(
mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
prefix='rnn_', dropout=0.5))
inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]
outputs, _ = cell.unroll(2, inputs, merge_outputs=None)
assert sorted(cell.params._params.keys()) == \
['rnn_parameters']
args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))
assert outs == [(10, 2, 50)]
outputs = outputs.eval(ctx=mx.gpu(0),
rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,
rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))
expected_outputs = np.ones((10, 2, 50))+5
assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def test_sequence_reverse():
check_sequence_reverse(mx.gpu(0))
@with_seed()
def test_autograd_save_memory():
x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
x.attach_grad()
with mx.autograd.record():
for i in range(200):
x = x + 1
x.wait_to_read()
x.backward()
@with_seed()
def test_cuda_rtc():
source = r'''
extern "C" __global__ void axpy(const float *x, float *y, float alpha) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
y[i] += alpha * x[i];
}
extern "C" __global__ void saxpy(const float *x, float *y, float alpha) {
extern __shared__ float smem[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
smem[threadIdx.x] = x[i];
y[i] += alpha * smem[threadIdx.x];
}
'''
module = mx.rtc.CudaModule(source)
axpy = module.get_kernel("axpy", "const float *x, float *y, float alpha")
x = mx.nd.ones((10,), ctx=mx.gpu(0))
y = mx.nd.zeros((10,), ctx=mx.gpu(0))
axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))
assert (y.asnumpy() == 3).all()
saxpy = module.get_kernel("saxpy", "const float *x, float *y, float alpha")
saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)
assert (y.asnumpy() == 7).all()
saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)
assert (y.asnumpy() == 12).all()
@with_seed()
def test_cross_device_autograd():
x = mx.nd.random.uniform(shape=(10,))
x.attach_grad()
with mx.autograd.record():
y = mx.nd.tanh(x)
y = y.copyto(mx.gpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.cpu(0))
y = mx.nd.tanh(y)
y = y.copyto(mx.gpu(0))
y = y.copyto(mx.gpu(0))
y.backward()
dx = x.grad.asnumpy()
x.grad[:] = 0
with mx.autograd.record():
y = x
for i in range(3):
y = mx.nd.tanh(y)
y.backward()
assert_almost_equal(dx, x.grad.asnumpy())
@with_seed()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
rpn_min_size = feature_stride
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
def get_new_data(batch_size, ctx):
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
dtype = np.float32
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)
im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)
cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]
np.random.shuffle(cls)
cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)
bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)
for i in range(batch_size):
im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(80, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
return cls_prob, bbox_pred, im_info
def check_proposal_consistency(op, batch_size, with_nms=False):
'''
op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal
'''
cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))
rois_cpu, score_cpu = op(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
gpu_ctx = mx.gpu(0)
# copy data to gpu from cpu
cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)
bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)
im_info_gpu = im_info.as_in_context(gpu_ctx)
rois_gpu, score_gpu = op(
cls_prob = cls_prob_gpu,
bbox_pred = bbox_pred_gpu,
im_info = im_info_gpu,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = 0.7 if with_nms else 1.0,
rpn_min_size = rpn_min_size, output_score = True)
rois_cpu_np = rois_cpu.asnumpy()
rois_gpu_np = rois_gpu.asnumpy()
score_cpu_np = score_cpu.asnumpy()
score_gpu_np = score_gpu.asnumpy()
if not with_nms:
assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)
assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)
else:
# no 100% gurantee with nms
assert(np.sum(np.abs(score_cpu_np - score_gpu_np) < 1e-3) >= 10)
assert(np.sum(np.abs(rois_cpu_np - rois_gpu_np) < 1e-3) >= 40)
check_proposal_consistency(mx.nd.contrib.Proposal, 1)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5)
check_proposal_consistency(mx.nd.contrib.Proposal, 1, with_nms=True)
check_proposal_consistency(mx.nd.contrib.MultiProposal, 5, with_nms=True)
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
# if not run in a separate process (with its own address space for CUDA compatibility).
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
else:
with discard_stderr():
for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:
p = mpctx.Process(target=f)
p.start()
p.join()
assert p.exitcode != 0,\
"Expected a synchronous kernel error from %s(), none seen." % f.__name__
def test_incorrect_gpu():
# Try setting dev_id to a really big number
assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))
@with_seed()
def test_batchnorm_backwards_notrain():
for ctx in [mx.cpu(0), mx.gpu(0)]:
for cudnn_o in [False, True]:
B,C,H,W = 4,3,2,2
x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)
gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)
x.attach_grad()
with autograd.record(False):
y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),
fix_gamma=False, cudnn_off=cudnn_o)
loss=y.square().sum()
loss.backward(train_mode=False)
@with_seed()
def test_create_sparse_ndarray_gpu_to_cpu():
dim0 = 10
dim1 = 5
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
matrix = rand_ndarray(shape, 'row_sparse', density)
data = matrix.data
indices = matrix.indices
rsp_created = mx.nd.sparse.row_sparse_array((data, indices), shape=shape, ctx=mx.cpu())
assert rsp_created.stype == 'row_sparse'
assert same(rsp_created.data.asnumpy(), data.asnumpy())
assert same(rsp_created.indices.asnumpy(), indices.asnumpy())
rsp_copy = mx.nd.array(rsp_created)
assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
@with_seed()
def test_softmax_activation():
gpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.gpu(0))
cpu_a = mx.nd.array([[3., 0.5, -0.5, 2., 7.],
[2., -.4, 7., 3., 0.2]], ctx=mx.cpu())
cpu_a.attach_grad()
gpu_a.attach_grad()
with mx.autograd.record():
gpu_y = mx.nd.SoftmaxActivation(data = gpu_a)
cpu_y = mx.nd.SoftmaxActivation(data = cpu_a)
assert_almost_equal(cpu_y.asnumpy(), gpu_y.asnumpy(), atol = 1e-3, rtol = 1e-3)
gpu_y.backward()
cpu_y.backward()
assert_almost_equal(cpu_a.grad.asnumpy(), gpu_a.grad.asnumpy(),
atol = 1e-3, rtol = 1e-3)
@with_seed()
def test_bilinear_sampler_versions():
data = mx.sym.Variable('data')
grid = mx.sym.Variable('grid')
sym1 = mx.sym.BilinearSampler(data=data, grid=grid)
sym2 = mx.sym.BilinearSampler(data=data, grid=grid, cudnn_off=True)
sym3 = mx.sym.BilinearSampler(data=data, grid=grid)
test_cases = [[(1,3,15,16),(1,2,10,10)],
[(1,6,7,16),(1,2,10,4)],
[(1,7,3,16),(1,2,8,11)],
[(1,9,50,50),(1,2,50,50)]]
for item in test_cases:
data_shape, grid_shape = item
# kWriteTo
exe_cpu = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='write')
exe_gpu = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_cudnn = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='write')
exe_list = [exe_cpu, exe_gpu, exe_cudnn]
ref_idx = 0
test_data = np.random.uniform(low=-0.1, high=0.1,size=data_shape).astype(np.float32)
test_grid = np.random.uniform(low=-2, high=2, size=grid_shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
assert_almost_equal(exe_list[ref_idx].outputs[0].asnumpy(), exe.outputs[0].asnumpy(), rtol=1e-3, atol=1e-5)
out_grad = np.random.uniform(low=-0.01, high=0.01,size=data_shape[:2] + grid_shape[2:]).astype(np.float32)
for exe in exe_list:
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
data_grad = exe_list[ref_idx].grad_dict['data'].asnumpy()
grid_grad = exe_list[ref_idx].grad_dict['grid'].asnumpy()
# kAddTo
exe_cpu_addto = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req='add')
exe_gpu_addto = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_cudnn_addto = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req='add')
exe_list = [exe_cpu_addto, exe_gpu_addto, exe_cudnn_addto]
data_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['data'].shape).astype(np.float32)
grid_initial_grad = np.random.normal(size=exe_list[ref_idx].grad_dict['grid'].shape).astype(np.float32)
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.grad_dict['data'][:] = data_initial_grad
exe.grad_dict['grid'][:] = grid_initial_grad
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['data'].asnumpy(), data_grad + data_initial_grad, rtol=1e-3, atol=1e-5)
assert_almost_equal(exe_list[ref_idx].grad_dict['grid'].asnumpy(), grid_grad + grid_initial_grad, rtol=1e-3, atol=1e-5)
for req_dict in [{'data' : 'null', 'grid' : 'write'}, {'data' : 'write', 'grid' : 'null'}]:
# Mixture of kWriteTo and kNullOp
exe_cpu_mix = sym1.simple_bind(data=data_shape, grid=grid_shape, ctx=mx.cpu(), grad_req=req_dict)
exe_gpu_mix = sym2.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_cudnn_mix = sym3.simple_bind(data=data_shape, grid=grid_shape, ctx=default_context(), grad_req=req_dict)
exe_list = [exe_cpu_mix, exe_gpu_mix, exe_cudnn_mix]
for exe in exe_list:
exe.arg_dict['data'][:] = test_data
exe.arg_dict['grid'][:] = test_grid
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
if req_dict['data'] is 'write':
assert_almost_equal(exe.grad_dict['data'].asnumpy(), exe_list[ref_idx].grad_dict['data'].asnumpy(), rtol=1e-3, atol=1e-5)
if req_dict['grid'] is 'write':
assert_almost_equal(exe.grad_dict['grid'].asnumpy(), exe_list[ref_idx].grad_dict['grid'].asnumpy(), rtol=1e-3, atol=1e-5)
def test_context_num_gpus():
# Test that num_gpus reports at least one GPU, as the test is run on a GPU host.
assert mx.context.num_gpus() > 0
if __name__ == '__main__':
import nose
nose.runmodule()
|
test_diskutils.py
|
import functools
import gc
import os
import queue
import shutil
import subprocess
import sys
from time import sleep
from unittest import mock
import pytest
import dask
from distributed.compatibility import WINDOWS
from distributed.diskutils import WorkSpace
from distributed.metrics import time
from distributed.utils import mp_context
from distributed.utils_test import captured_logger
def assert_directory_contents(dir_path, expected, trials=2):
expected = [os.path.join(dir_path, p) for p in expected]
for i in range(trials):
actual = [
os.path.join(dir_path, p)
for p in os.listdir(dir_path)
if p not in ("global.lock", "purge.lock")
]
if sorted(actual) == sorted(expected):
break
else:
sleep(0.5)
else:
assert sorted(actual) == sorted(expected)
def test_workdir_simple(tmpdir):
# Test nominal operation of WorkSpace and WorkDirs
base_dir = str(tmpdir)
assert_contents = functools.partial(assert_directory_contents, base_dir)
ws = WorkSpace(base_dir)
assert_contents([])
a = ws.new_work_dir(name="aa")
assert_contents(["aa", "aa.dirlock"])
b = ws.new_work_dir(name="bb")
assert_contents(["aa", "aa.dirlock", "bb", "bb.dirlock"])
ws._purge_leftovers()
assert_contents(["aa", "aa.dirlock", "bb", "bb.dirlock"])
a.release()
assert_contents(["bb", "bb.dirlock"])
del b
gc.collect()
assert_contents([])
# Generated temporary name with a prefix
a = ws.new_work_dir(prefix="foo-")
b = ws.new_work_dir(prefix="bar-")
c = ws.new_work_dir(prefix="bar-")
assert_contents(
{a.dir_path, a._lock_path, b.dir_path, b._lock_path, c.dir_path, c._lock_path}
)
assert os.path.basename(a.dir_path).startswith("foo-")
assert os.path.basename(b.dir_path).startswith("bar-")
assert os.path.basename(c.dir_path).startswith("bar-")
assert b.dir_path != c.dir_path
def test_two_workspaces_in_same_directory(tmpdir):
# If handling the same directory with two WorkSpace instances,
# things should work ok too
base_dir = str(tmpdir)
assert_contents = functools.partial(assert_directory_contents, base_dir)
ws = WorkSpace(base_dir)
assert_contents([])
a = ws.new_work_dir(name="aa")
assert_contents(["aa", "aa.dirlock"])
ws2 = WorkSpace(base_dir)
ws2._purge_leftovers()
assert_contents(["aa", "aa.dirlock"])
b = ws.new_work_dir(name="bb")
assert_contents(["aa", "aa.dirlock", "bb", "bb.dirlock"])
del ws
del b
gc.collect()
assert_contents(["aa", "aa.dirlock"], trials=5)
del a
gc.collect()
assert_contents([], trials=5)
def test_workspace_process_crash(tmpdir):
# WorkSpace should be able to clean up stale contents left by
# crashed process
base_dir = str(tmpdir)
assert_contents = functools.partial(assert_directory_contents, base_dir)
ws = WorkSpace(base_dir)
code = """if 1:
import signal
import sys
import time
from distributed.diskutils import WorkSpace
ws = WorkSpace(%(base_dir)r)
a = ws.new_work_dir(name='aa')
b = ws.new_work_dir(prefix='foo-')
print((a.dir_path, b.dir_path))
sys.stdout.flush()
time.sleep(100)
""" % dict(
base_dir=base_dir
)
p = subprocess.Popen(
[sys.executable, "-c", code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
line = p.stdout.readline()
assert p.poll() is None
a_path, b_path = eval(line)
assert_contents([a_path, a_path + ".dirlock", b_path, b_path + ".dirlock"])
# The child process holds a lock so the work dirs shouldn't be removed
ws._purge_leftovers()
assert_contents([a_path, a_path + ".dirlock", b_path, b_path + ".dirlock"])
# Kill the process so it's unable to clear the work dirs itself
p.kill()
assert p.wait() # process returned with non-zero code
assert_contents([a_path, a_path + ".dirlock", b_path, b_path + ".dirlock"])
with captured_logger("distributed.diskutils", "INFO", propagate=False) as sio:
ws._purge_leftovers()
assert_contents([])
# One log line per purged directory
lines = sio.getvalue().splitlines()
assert len(lines) == 2
for p in (a_path, b_path):
assert any(repr(p) in line for line in lines)
def test_workspace_rmtree_failure(tmpdir):
base_dir = str(tmpdir)
ws = WorkSpace(base_dir)
a = ws.new_work_dir(name="aa")
shutil.rmtree(a.dir_path)
with captured_logger("distributed.diskutils", "ERROR", propagate=False) as sio:
a.release()
lines = sio.getvalue().splitlines()
# shutil.rmtree() may call its onerror callback several times
assert lines
for line in lines:
assert line.startswith("Failed to remove %r" % (a.dir_path,))
def test_locking_disabled(tmpdir):
base_dir = str(tmpdir)
with dask.config.set({"distributed.worker.use-file-locking": False}):
with mock.patch("distributed.diskutils.locket.lock_file") as lock_file:
assert_contents = functools.partial(assert_directory_contents, base_dir)
ws = WorkSpace(base_dir)
assert_contents([])
a = ws.new_work_dir(name="aa")
assert_contents(["aa"])
b = ws.new_work_dir(name="bb")
assert_contents(["aa", "bb"])
ws._purge_leftovers()
assert_contents(["aa", "bb"])
a.release()
assert_contents(["bb"])
del b
gc.collect()
assert_contents([])
lock_file.assert_not_called()
def _workspace_concurrency(base_dir, purged_q, err_q, stop_evt):
ws = WorkSpace(base_dir)
n_purged = 0
with captured_logger("distributed.diskutils", "ERROR") as sio:
while not stop_evt.is_set():
# Add a bunch of locks, and simulate forgetting them
try:
purged = ws._purge_leftovers()
except Exception as e:
err_q.put(e)
else:
n_purged += len(purged)
lines = sio.getvalue().splitlines()
if lines:
try:
raise AssertionError("got %d logs, see stderr" % (len(lines)))
except Exception as e:
err_q.put(e)
purged_q.put(n_purged)
def _test_workspace_concurrency(tmpdir, timeout, max_procs):
"""
WorkSpace concurrency test. We merely check that no exception or
deadlock happens.
"""
base_dir = str(tmpdir)
err_q = mp_context.Queue()
purged_q = mp_context.Queue()
stop_evt = mp_context.Event()
ws = WorkSpace(base_dir)
# Make sure purging only happens in the child processes
ws._purge_leftovers = lambda: None
# Run a bunch of child processes that will try to purge concurrently
NPROCS = 2 if sys.platform == "win32" else max_procs
processes = [
mp_context.Process(
target=_workspace_concurrency, args=(base_dir, purged_q, err_q, stop_evt)
)
for i in range(NPROCS)
]
for p in processes:
p.start()
n_created = 0
n_purged = 0
try:
t1 = time()
while time() - t1 < timeout:
# Add a bunch of locks, and simulate forgetting them.
# The concurrent processes should try to purge them.
for i in range(50):
d = ws.new_work_dir(prefix="workspace-concurrency-")
d._finalizer.detach()
n_created += 1
sleep(1e-2)
finally:
stop_evt.set()
for p in processes:
p.join()
# Any errors?
try:
err = err_q.get_nowait()
except queue.Empty:
pass
else:
raise err
try:
while True:
n_purged += purged_q.get_nowait()
except queue.Empty:
pass
# We attempted to purge most directories at some point
assert n_purged >= 0.5 * n_created > 0
return n_created, n_purged
def test_workspace_concurrency(tmpdir):
if WINDOWS:
raise pytest.xfail.Exception("TODO: unknown failure on windows")
if sys.version_info < (3, 6):
raise pytest.xfail.Exception("TODO: unknown failure on Python 3.5")
_test_workspace_concurrency(tmpdir, 2.0, 6)
@pytest.mark.slow
def test_workspace_concurrency_intense(tmpdir):
n_created, n_purged = _test_workspace_concurrency(tmpdir, 8.0, 16)
assert n_created >= 100
|
client.py
|
# -*- coding=utf-8 -*-
import base64
import hashlib
import logging
import socket
import json
import platform
import time
try:
import ssl
except ImportError:
ssl = None
from multiprocessing import Process, Manager, Queue, pool
from threading import RLock, Thread
try:
# python3.6
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote
from urllib.error import HTTPError, URLError
except ImportError:
# python2.7
import httplib as HTTPStatus
from urllib2 import Request, urlopen, HTTPError, URLError, ProxyHandler, build_opener
from urllib import urlencode, unquote_plus, quote
base64.encodebytes = base64.encodestring
from .commons import synchronized_with_attr, truncate, python_version_bellow
from .params import group_key, parse_key, is_valid
from .files import read_file_str, save_file, delete_file
from .exception import NacosException, NacosRequestException
from .listener import Event, SimpleListenerManager
from .timer import NacosTimer, NacosTimerManager
logging.basicConfig()
logger = logging.getLogger(__name__)
DEBUG = False
VERSION = "0.1.5"
DEFAULT_GROUP_NAME = "DEFAULT_GROUP"
DEFAULT_NAMESPACE = ""
WORD_SEPARATOR = u'\x02'
LINE_SEPARATOR = u'\x01'
DEFAULTS = {
"APP_NAME": "Nacos-SDK-Python",
"TIMEOUT": 3, # in seconds
"PULLING_TIMEOUT": 30, # in seconds
"PULLING_CONFIG_SIZE": 3000,
"CALLBACK_THREAD_NUM": 10,
"FAILOVER_BASE": "nacos-data/data",
"SNAPSHOT_BASE": "nacos-data/snapshot",
}
OPTIONS = {"default_timeout", "pulling_timeout", "pulling_config_size", "callback_thread_num", "failover_base",
"snapshot_base", "no_snapshot", "proxies"}
def process_common_config_params(data_id, group):
if not group or not group.strip():
group = DEFAULT_GROUP_NAME
else:
group = group.strip()
if not data_id or not is_valid(data_id):
raise NacosException("Invalid dataId.")
if not is_valid(group):
raise NacosException("Invalid group.")
return data_id, group
def parse_pulling_result(result):
if not result:
return list()
ret = list()
for i in unquote_plus(result.decode()).split(LINE_SEPARATOR):
if not i.strip():
continue
sp = i.split(WORD_SEPARATOR)
if len(sp) < 3:
sp.append("")
ret.append(sp)
return ret
class WatcherWrap:
def __init__(self, key, callback, last_md5=None):
self.callback = callback
self.last_md5 = last_md5
self.watch_key = key
class CacheData:
def __init__(self, key, client):
self.key = key
local_value = read_file_str(client.failover_base, key) or read_file_str(client.snapshot_base, key)
self.content = local_value
self.md5 = hashlib.md5(local_value.encode("UTF-8")).hexdigest() if local_value else None
self.is_init = True
if not self.md5:
logger.debug("[init-cache] cache for %s does not have local value" % key)
class SubscribedLocalInstance(object):
def __init__(self, key, instance):
self.key = key
self.instance_id = instance["instanceId"]
self.md5 = NacosClient.get_md5(str(instance))
self.instance = instance
class SubscribedLocalManager(object):
def __init__(self):
self.manager = {
# "key1": {
# "LOCAL_INSTANCES": {
# "instanceId1": None,
# "instanceId2": None,
# "instanceId3": None,
# "instanceId4": None
# },
# "LISTENER_MANAGER": None
# },
# "key2": {
# "LOCAL_INSTANCES": {
# "instanceId1": "",
# "instanceId2": "",
# "instanceId3": "",
# "instanceId4": ""
# },
# "LISTENER_MANAGER": None
# }
}
def do_listener_launch(self, key, event, slc):
listener_manager = self.get_local_listener_manager(key)
if listener_manager and isinstance(listener_manager, SimpleListenerManager):
listener_manager.do_launch(event, slc)
def get_local_listener_manager(self, key):
key_node = self.manager.get(key)
if not key_node:
return None
return key_node.get("LISTENER_MANAGER")
def add_local_listener(self, key, listener_fn):
if not self.manager.get(key):
self.manager[key] = {}
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager or not isinstance(local_listener_manager, SimpleListenerManager):
self.manager.get(key)["LISTENER_MANAGER"] = SimpleListenerManager()
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager:
return self
if isinstance(listener_fn, list):
listener_fn = tuple(listener_fn)
local_listener_manager.add_listeners(*listener_fn)
if isinstance(listener_fn, tuple):
local_listener_manager.add_listeners(*listener_fn)
# just single listener function
else:
local_listener_manager.add_listener(listener_fn)
return self
def add_local_listener_manager(self, key, listener_manager):
key_node = self.manager.get(key)
if key_node is None:
key_node = {}
key_node["LISTENER_MANAGER"] = listener_manager
return self
def get_local_instances(self, key):
if not self.manager.get(key):
return None
return self.manager.get(key).get("LOCAL_INSTANCES")
def add_local_instance(self, slc):
if not self.manager.get(slc.key):
self.manager[slc.key] = {}
if not self.manager.get(slc.key).get('LOCAL_INSTANCES'):
self.manager.get(slc.key)['LOCAL_INSTANCES'] = {}
self.manager.get(slc.key)['LOCAL_INSTANCES'][slc.instance_id] = slc
return self
def remove_local_instance(self, slc):
key_node = self.manager.get(slc.key)
if not key_node:
return self
local_instances_node = key_node.get("LOCAL_INSTANCES")
if not local_instances_node:
return self
local_instance = local_instances_node.get(slc.instance_id)
if not local_instance:
return self
local_instances_node.pop(slc.instance_id)
return self
def parse_nacos_server_addr(server_addr):
sp = server_addr.split(":")
port = int(sp[1]) if len(sp) > 1 else 8848
return sp[0], port
class NacosClient:
debug = False
@staticmethod
def set_debugging():
if not NacosClient.debug:
global logger
logger = logging.getLogger("nacos")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s:%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
NacosClient.debug = True
@staticmethod
def get_md5(content):
return hashlib.md5(content.encode("UTF-8")).hexdigest() if content is not None else None
def __init__(self, server_addresses, endpoint=None, namespace=None, ak=None, sk=None, username=None, password=None):
self.server_list = list()
try:
for server_addr in server_addresses.split(","):
self.server_list.append(parse_nacos_server_addr(server_addr.strip()))
except Exception as ex:
logger.exception("[init] bad server address for %s" % server_addresses)
raise ex
self.current_server = self.server_list[0]
self.endpoint = endpoint
self.namespace = namespace or DEFAULT_NAMESPACE or ""
self.ak = ak
self.sk = sk
self.username = username
self.password = password
self.server_list_lock = RLock()
self.server_offset = 0
self.watcher_mapping = dict()
self.subscribed_local_manager = SubscribedLocalManager()
self.subscribe_timer_manager = NacosTimerManager()
self.pulling_lock = RLock()
self.puller_mapping = None
self.notify_queue = None
self.callback_tread_pool = None
self.process_mgr = None
self.default_timeout = DEFAULTS["TIMEOUT"]
self.auth_enabled = self.ak and self.sk
self.cai_enabled = True
self.pulling_timeout = DEFAULTS["PULLING_TIMEOUT"]
self.pulling_config_size = DEFAULTS["PULLING_CONFIG_SIZE"]
self.callback_thread_num = DEFAULTS["CALLBACK_THREAD_NUM"]
self.failover_base = DEFAULTS["FAILOVER_BASE"]
self.snapshot_base = DEFAULTS["SNAPSHOT_BASE"]
self.no_snapshot = False
self.proxies = None
logger.info("[client-init] endpoint:%s, tenant:%s" % (endpoint, namespace))
def set_options(self, **kwargs):
for k, v in kwargs.items():
if k not in OPTIONS:
logger.warning("[set_options] unknown option:%s, ignored" % k)
continue
logger.debug("[set_options] key:%s, value:%s" % (k, v))
setattr(self, k, v)
def change_server(self):
with self.server_list_lock:
self.server_offset = (self.server_offset + 1) % len(self.server_list)
self.current_server = self.server_list[self.server_offset]
def get_server(self):
logger.info("[get-server] use server:%s" % str(self.current_server))
return self.current_server
def remove_config(self, data_id, group, timeout=None):
data_id, group = process_common_config_params(data_id, group)
logger.info(
"[remove] data_id:%s, group:%s, namespace:%s, timeout:%s" % (data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove] remove group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error(
"[remove] no right for namespace:%s, group:%s, data_id:%s" % (self.namespace, group, data_id))
raise NacosException("Insufficient privilege.")
else:
logger.error("[remove] error code [:%s] for namespace:%s, group:%s, data_id:%s" % (
e.code, self.namespace, group, data_id))
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove] exception %s occur" % str(e))
raise
def publish_config(self, data_id, group, content, app_name=None, timeout=None):
if content is None:
raise NacosException("Can not publish none content, use remove instead.")
data_id, group = process_common_config_params(data_id, group)
if type(content) == bytes:
content = content.decode("UTF-8")
logger.info("[publish] data_id:%s, group:%s, namespace:%s, content:%s, timeout:%s" % (
data_id, group, self.namespace, truncate(content), timeout))
params = {
"dataId": data_id,
"group": group,
"content": content.encode("UTF-8"),
}
if self.namespace:
params["tenant"] = self.namespace
if app_name:
params["appName"] = app_name
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "POST")
c = resp.read()
logger.info("[publish] publish content, group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[publish] exception %s occur" % str(e))
raise
def get_config(self, data_id, group, timeout=None, no_snapshot=None):
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
data_id, group = process_common_config_params(data_id, group)
logger.info("[get-config] data_id:%s, group:%s, namespace:%s, timeout:%s" % (
data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key(data_id, group, self.namespace)
# get from failover
content = read_file_str(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return content
# get from server
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.NOT_FOUND:
logger.warning(
"[get-config] config not found for data_id:%s, group:%s, namespace:%s, try to delete snapshot" % (
data_id, group, self.namespace))
delete_file(self.snapshot_base, cache_key)
return None
elif e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-config] config being modified concurrently for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-config] no right for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-config] error code [:%s] for data_id:%s, group:%s, namespace:%s" % (
e.code, data_id, group, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return content
if content is not None:
logger.info(
"[get-config] content from server:%s, data_id:%s, group:%s, namespace:%s, try to save snapshot" % (
truncate(content), data_id, group, self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
except Exception as e:
logger.exception("[get-config] save snapshot failed for %s, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace, str(e)))
return content
logger.error("[get-config] get config from server failed, try snapshot, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.warning("[get-config] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-config] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return content
def get_configs(self, timeout=None, no_snapshot=None, group="", page_no=1, page_size=1000):
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
logger.info("[get-configs] namespace:%s, timeout:%s, group:%s, page_no:%s, page_size:%s" % (
self.namespace, timeout, group, page_no, page_size))
params = {
"dataId": "",
"group": group,
"search": "accurate",
"pageNo": page_no,
"pageSize": page_size,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key("", "", self.namespace)
# get from failover
content = read_file_str(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
# get from server
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-configs] configs being modified concurrently for namespace:%s" % self.namespace)
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-configs] no right for namespace:%s" % self.namespace)
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-configs] error code [:%s] for namespace:%s" % (e.code, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return json.loads(content)
if content is not None:
logger.info(
"[get-configs] content from server:%s, namespace:%s, try to save snapshot" % (
truncate(content), self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
for item in json.loads(content).get("pageItems"):
data_id = item.get('dataId')
group = item.get('group')
item_content = item.get('content')
item_cache_key = group_key(data_id, group, self.namespace)
save_file(self.snapshot_base, item_cache_key, item_content)
except Exception as e:
logger.exception("[get-configs] save snapshot failed for %s, namespace:%s" % (
str(e), self.namespace))
return json.loads(content)
logger.error("[get-configs] get config from server failed, try snapshot, namespace:%s" % self.namespace)
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.warning("[get-configs] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-configs] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
@synchronized_with_attr("pulling_lock")
def add_config_watcher(self, data_id, group, cb, content=None):
self.add_config_watchers(data_id, group, [cb], content)
@synchronized_with_attr("pulling_lock")
def add_config_watchers(self, data_id, group, cb_list, content=None):
if not cb_list:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
logger.info("[add-watcher] data_id:%s, group:%s, namespace:%s" % (data_id, group, self.namespace))
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
wl = list()
self.watcher_mapping[cache_key] = wl
if not content:
content = self.get_config(data_id, group)
last_md5 = NacosClient.get_md5(content)
for cb in cb_list:
wl.append(WatcherWrap(cache_key, cb, last_md5))
logger.info("[add-watcher] watcher has been added for key:%s, new callback is:%s, callback number is:%s" % (
cache_key, cb.__name__, len(wl)))
if self.puller_mapping is None:
logger.debug("[add-watcher] pulling should be initialized")
self._init_pulling()
if cache_key in self.puller_mapping:
logger.debug("[add-watcher] key:%s is already in pulling" % cache_key)
return
for key, puller_info in self.puller_mapping.items():
if len(puller_info[1]) < self.pulling_config_size:
logger.debug("[add-watcher] puller:%s is available, add key:%s" % (puller_info[0], cache_key))
puller_info[1].append(cache_key)
self.puller_mapping[cache_key] = puller_info
break
else:
logger.debug("[add-watcher] no puller available, new one and add key:%s" % cache_key)
key_list = self.process_mgr.list()
key_list.append(cache_key)
sys_os = platform.system()
if sys_os == 'Windows':
puller = Thread(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.setDaemon(True)
else:
puller = Process(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.daemon = True
puller.start()
self.puller_mapping[cache_key] = (puller, key_list)
@synchronized_with_attr("pulling_lock")
def remove_config_watcher(self, data_id, group, cb, remove_all=False):
if not cb:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
if not self.puller_mapping:
logger.warning("[remove-watcher] watcher is never started.")
return
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[remove-watcher] there is no watcher on key:%s" % cache_key)
return
wrap_to_remove = list()
for i in wl:
if i.callback == cb:
wrap_to_remove.append(i)
if not remove_all:
break
for i in wrap_to_remove:
wl.remove(i)
logger.info("[remove-watcher] %s is removed from %s, remove all:%s" % (cb.__name__, cache_key, remove_all))
if not wl:
logger.debug("[remove-watcher] there is no watcher for:%s, kick out from pulling" % cache_key)
self.watcher_mapping.pop(cache_key)
puller_info = self.puller_mapping[cache_key]
puller_info[1].remove(cache_key)
if not puller_info[1]:
logger.debug("[remove-watcher] there is no pulling keys for puller:%s, stop it" % puller_info[0])
self.puller_mapping.pop(cache_key)
if isinstance(puller_info[0], Process):
puller_info[0].terminate()
def _do_sync_req(self, url, headers=None, params=None, data=None, timeout=None, method="GET"):
if self.username and self.password:
if not params:
params = {}
params.update({"username": self.username, "password": self.password})
url = "?".join([url, urlencode(params)]) if params else url
all_headers = self._get_common_headers(params, data)
if headers:
all_headers.update(headers)
logger.debug(
"[do-sync-req] url:%s, headers:%s, params:%s, data:%s, timeout:%s" % (
url, all_headers, params, data, timeout))
tries = 0
while True:
try:
server_info = self.get_server()
if not server_info:
logger.error("[do-sync-req] can not get one server.")
raise NacosRequestException("Server is not available.")
address, port = server_info
server = ":".join([address, str(port)])
server_url = "%s://%s" % ("http", server)
if python_version_bellow("3"):
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers)
req.get_method = lambda: method
else:
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers, method=method)
# build a new opener that adds proxy setting so that http request go through the proxy
if self.proxies:
proxy_support = ProxyHandler(self.proxies)
opener = build_opener(proxy_support)
resp = opener.open(req, timeout=timeout)
else:
# for python version compatibility
if python_version_bellow("2.7.9"):
resp = urlopen(req, timeout=timeout)
else:
resp = urlopen(req, timeout=timeout, context=None)
logger.debug("[do-sync-req] info from server:%s" % server)
return resp
except HTTPError as e:
if e.code in [HTTPStatus.INTERNAL_SERVER_ERROR, HTTPStatus.BAD_GATEWAY,
HTTPStatus.SERVICE_UNAVAILABLE]:
logger.warning("[do-sync-req] server:%s is not available for reason:%s" % (server, e.msg))
else:
raise
except socket.timeout:
logger.warning("[do-sync-req] %s request timeout" % server)
except URLError as e:
logger.warning("[do-sync-req] %s connection error:%s" % (server, e.reason))
tries += 1
if tries >= len(self.server_list):
logger.error("[do-sync-req] %s maybe down, no server is currently available" % server)
raise NacosRequestException("All server are not available")
self.change_server()
logger.warning("[do-sync-req] %s maybe down, skip to next" % server)
def _do_pulling(self, cache_list, queue):
cache_pool = dict()
for cache_key in cache_list:
cache_pool[cache_key] = CacheData(cache_key, self)
while cache_list:
unused_keys = set(cache_pool.keys())
contains_init_key = False
probe_update_string = ""
for cache_key in cache_list:
cache_data = cache_pool.get(cache_key)
if not cache_data:
logger.debug("[do-pulling] new key added: %s" % cache_key)
cache_data = CacheData(cache_key, self)
cache_pool[cache_key] = cache_data
else:
unused_keys.remove(cache_key)
if cache_data.is_init:
contains_init_key = True
data_id, group, namespace = parse_key(cache_key)
probe_update_string += WORD_SEPARATOR.join(
[data_id, group, cache_data.md5 or "", self.namespace]) + LINE_SEPARATOR
for k in unused_keys:
logger.debug("[do-pulling] %s is no longer watched, remove from cache" % k)
cache_pool.pop(k)
logger.debug(
"[do-pulling] try to detected change from server probe string is %s" % truncate(probe_update_string))
headers = {"Long-Pulling-Timeout": int(self.pulling_timeout * 1000)}
# if contains_init_key:
# headers["longPullingNoHangUp"] = "true"
data = {"Listening-Configs": probe_update_string}
changed_keys = list()
try:
resp = self._do_sync_req("/nacos/v1/cs/configs/listener", headers, None, data,
self.pulling_timeout + 10, "POST")
changed_keys = [group_key(*i) for i in parse_pulling_result(resp.read())]
logger.debug("[do-pulling] following keys are changed from server %s" % truncate(str(changed_keys)))
except NacosException as e:
logger.error("[do-pulling] nacos exception: %s, waiting for recovery" % str(e))
time.sleep(1)
except Exception as e:
logger.exception("[do-pulling] exception %s occur, return empty list, waiting for recovery" % str(e))
time.sleep(1)
for cache_key, cache_data in cache_pool.items():
cache_data.is_init = False
if cache_key in changed_keys:
data_id, group, namespace = parse_key(cache_key)
content = self.get_config(data_id, group)
cache_data.md5 = NacosClient.get_md5(content)
cache_data.content = content
queue.put((cache_key, cache_data.content, cache_data.md5))
@synchronized_with_attr("pulling_lock")
def _init_pulling(self):
if self.puller_mapping is not None:
logger.info("[init-pulling] puller is already initialized")
return
self.puller_mapping = dict()
self.notify_queue = Queue()
self.callback_tread_pool = pool.ThreadPool(self.callback_thread_num)
self.process_mgr = Manager()
t = Thread(target=self._process_polling_result)
t.setDaemon(True)
t.start()
logger.info("[init-pulling] init completed")
def _process_polling_result(self):
while True:
cache_key, content, md5 = self.notify_queue.get()
logger.debug("[process-polling-result] receive an event:%s" % cache_key)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[process-polling-result] no watcher on %s, ignored" % cache_key)
continue
data_id, group, namespace = parse_key(cache_key)
plain_content = content
params = {
"data_id": data_id,
"group": group,
"namespace": namespace,
"raw_content": content,
"content": plain_content,
}
for watcher in wl:
if not watcher.last_md5 == md5:
logger.debug(
"[process-polling-result] md5 changed since last call, calling %s with changed params: %s"
% (watcher.callback.__name__, params))
try:
self.callback_tread_pool.apply(watcher.callback, (params,))
except Exception as e:
logger.exception("[process-polling-result] exception %s occur while calling %s " % (
str(e), watcher.callback.__name__))
watcher.last_md5 = md5
def _get_common_headers(self, params, data):
return {}
def _build_metadata(self, metadata, params):
if metadata:
if isinstance(metadata, dict):
params["metadata"] = json.dumps(metadata)
else:
params["metadata"] = metadata
def add_naming_instance(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None,
enable=True, healthy=True, ephemeral=True,group_name=DEFAULT_GROUP_NAME):
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"weight": weight,
"enable": enable,
"healthy": healthy,
"clusterName": cluster_name,
"ephemeral": ephemeral,
"groupName": group_name
}
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "POST")
c = resp.read()
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[add-naming-instance] exception %s occur" % str(e))
raise
def remove_naming_instance(self, service_name, ip, port, cluster_name=None, ephemeral=True,group_name=DEFAULT_GROUP_NAME):
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName":group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove-naming-instance] exception %s occur" % str(e))
raise
def modify_naming_instance(self, service_name, ip, port, cluster_name=None, weight=None, metadata=None,
enable=None, ephemeral=True,group_name=DEFAULT_GROUP_NAME):
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName": group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if enable is not None:
params["enable"] = enable
if weight is not None:
params["weight"] = weight
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "PUT")
c = resp.read()
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[modify-naming-instance] exception %s occur" % str(e))
raise
def list_naming_instance(self, service_name, clusters=None, namespace_id=None, group_name=None, healthy_only=False):
"""
:param service_name: 服务名
:param clusters: 集群名称 字符串,多个集群用逗号分隔
:param namespace_id: 命名空间ID
:param group_name: 分组名
:param healthy_only: 是否只返回健康实例 否,默认为false
"""
logger.info("[list-naming-instance] service_name:%s, namespace:%s" % (service_name, self.namespace))
params = {
"serviceName": service_name,
"healthyOnly": healthy_only
}
if clusters is not None:
params["clusters"] = clusters
namespace_id = namespace_id or self.namespace
if namespace_id:
params["namespaceId"] = namespace_id
group_name = group_name or 'DEFAULT_GROUP'
if group_name:
params['groupName'] = group_name
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/list", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[list-naming-instance] service_name:%s, namespace:%s, server response:%s" %
(service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[list-naming-instance] exception %s occur" % str(e))
raise
def get_naming_instance(self, service_name, ip, port, cluster_name=None):
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
params = {
"serviceName": service_name,
"ip": ip,
"port": port,
}
if cluster_name is not None:
params["cluster"] = cluster_name
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[get-naming-instance] exception %s occur" % str(e))
raise
def send_heartbeat(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None, ephemeral=True,group_name=DEFAULT_GROUP_NAME):
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
beat_data = {
"serviceName": service_name,
"ip": ip,
"port": port,
"weight": weight,
"ephemeral": ephemeral
}
if cluster_name is not None:
beat_data["cluster"] = cluster_name
if metadata is not None:
if isinstance(metadata, str):
beat_data["metadata"] = json.loads(metadata)
else:
beat_data["metadata"] = metadata
params = {
"serviceName": service_name,
"beat": json.dumps(beat_data),
"groupName": group_name
}
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/beat", None, params, None, self.default_timeout, "PUT")
c = resp.read()
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[send-heartbeat] exception %s occur" % str(e))
raise
def subscribe(self,
listener_fn, listener_interval=7, *args, **kwargs):
"""
reference at `/nacos/v1/ns/instance/list` in https://nacos.io/zh-cn/docs/open-api.html
:param listener_fn 监听方法,可以是元组,列表,单个监听方法
:param listener_interval 监听间隔,在 HTTP 请求 OpenAPI 时间间隔
:return:
"""
service_name = kwargs.get("service_name")
if not service_name:
if len(args) > 0:
service_name = args[0]
else:
raise NacosException("`service_name` is required in subscribe")
self.subscribed_local_manager.add_local_listener(key=service_name, listener_fn=listener_fn)
# 判断是否是第一次订阅调用
class _InnerSubContext(object):
first_sub = True
def _compare_and_trigger_listener():
# invoke `list_naming_instance`
latest_res = self.list_naming_instance(*args, **kwargs)
latest_instances = latest_res['hosts']
# 获取本地缓存实例
local_service_instances_dict = self.subscribed_local_manager.get_local_instances(service_name)
# 当前本地没有缓存,所有都是新的实例
if not local_service_instances_dict:
if not latest_instances or len(latest_instances) < 1:
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
self.subscribed_local_manager.add_local_instance(slc)
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
else:
local_service_instances_dict_copy = local_service_instances_dict.copy()
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
local_slc = local_service_instances_dict.get(slc.instance_id)
# 本地不存在实例缓存
if local_slc is None:
self.subscribed_local_manager.add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
# 本地存在实例缓存
else:
local_slc_md5 = local_slc.md5
local_slc_id = local_slc.instance_id
local_service_instances_dict_copy.pop(local_slc_id)
# 比较md5,存在实例变更
if local_slc_md5 != slc.md5:
self.subscribed_local_manager.remove_local_instance(local_slc).add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.MODIFIED, slc)
# still have instances in local marked deleted
if len(local_service_instances_dict_copy) > 0:
for local_slc_id, slc in local_service_instances_dict_copy.items():
self.subscribed_local_manager.remove_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.DELETED, slc)
timer_name = 'service-subscribe-timer-{key}'.format(key=service_name)
subscribe_timer = NacosTimer(name=timer_name,
interval=listener_interval,
fn=_compare_and_trigger_listener)
subscribe_timer.scheduler()
self.subscribe_timer_manager.add_timer(subscribe_timer)
def unsubscribe(self, service_name, listener_name=None):
"""
remove listener from subscribed listener manager
:param service_name: service_name
:param listener_name: listener name
:return:
"""
listener_manager = self.subscribed_local_manager.get_local_listener_manager(key=service_name)
if not listener_manager:
return
if listener_name:
listener_manager.remove_listener(listener_name)
return
listener_manager.empty_listeners()
def stop_subscribe(self):
"""
stop subscribe timer scheduler
:return:
"""
self.subscribe_timer_manager.stop()
if DEBUG:
NacosClient.set_debugging()
|
cpumultilimit.py
|
# coding: UTF-8
import subprocess
import threading
import time
import traceback
from subprocess import check_output
#######config###########
#設定。limitpercentでcpulimit -l で指定するCPUの制限値
limitpercent = 30
#設定。limitprocessnameでcpulimitするプロセス名を指定する
limitprocessname ="bedrock_server"
########################
########functions#######
#cpulimitさせる関数
def cd_exec(pid):
lp = None
print(pid);
try:
lp = subprocess.Popen([ "cpulimit", "-l", str(limitpercent), "-p", str(pid)], shell=False)
except Exception:
traceback.print_exc()
print("limitprocessprocessException")
#PIDをプロセス名から取得する関数
#https://stackoverflow.com/questions/26688936/how-to-get-pid-by-process-name
def scraiping_pid(processname):
try:
#コマンド出力をlistにするー>utf-8としてデコードー>setでリターン
maped = set([i.decode("utf-8") for i in check_output(["pidof", processname]).split()])
print(maped)
return maped
except Exception:
traceback.print_exc()
print("scraiping_pidException")
########################
########Main Thread#####
pidsmap = scraiping_pid(limitprocessname)
print("pidsmap:"+str(pidsmap))
#pid一覧に基づきcpulimitを投げる
for p in pidsmap:
thread_1 = threading.Thread(target=cd_exec(p))
while True:
newpid = scraiping_pid(limitprocessname)
print("/////////")
print("newpid:" + str(newpid))
print("pidsmap:" + str(pidsmap))
#まれにAttributeError: 'NoneType' object has no attribute 'difference'を吐くのでtryexceptで。
try:
# https://xwave.exblog.jp/11309038/
# https://note.nkmk.me/python-list-str-num-conversion/
# 差分で回す newpidにあって、pidsmapにないpidを求める
list_ab = newpid.difference(pidsmap)
print("list_ab:" + str(list_ab))
print("*********")
for l in list_ab:
thread_2 = threading.Thread(target=cd_exec(l))
except Exception:
traceback.print_exc()
#新しいpidsを設定
pidsmap = newpid
#pidofコマンドを連射しないように休む
time.sleep(10)
########################
|
object_detection_multithreading.py
|
import os
import cv2
import time
import argparse
import numpy as np
import tensorflow as tf
from socket import *
from queue import Queue
from threading import Thread
from utils.app_utils import FPS, WebcamVideoStream, draw_boxes_and_labels
from object_detection.utils import label_map_util
CWD_PATH = os.getcwd()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
PATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME,
'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data',
'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
port = 10000
buf = 1024
# Loading label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
rect_points, class_names, class_colors = draw_boxes_and_labels(
boxes=np.squeeze(boxes),
classes=np.squeeze(classes).astype(np.int32),
scores=np.squeeze(scores),
category_index=category_index,
min_score_thresh=.5)
return dict(
rect_points=rect_points,
class_names=class_names,
class_colors=class_colors)
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output_q.put(detect_objects(frame_rgb, sess, detection_graph))
fps.stop()
sess.close()
def sendFile(fName, destAddr):
s = socket(AF_INET, SOCK_DGRAM)
f = open(fName, "rb")
data = f.read(buf)
while data:
if (s.sendto(data, destAddr)):
data = f.read(buf)
f.close()
s.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-src',
dest='video_source',
type=str,
default='0.0.0.0:8080',
help='IP addr and port of the IPCam server.')
parser.add_argument(
'-dst',
dest='video_dest',
type=str,
default='0.0.0.0',
help='IP addr and port of the target computer')
parser.add_argument(
'-wd',
'--width',
dest='width',
type=int,
default=480,
help='Width of the frames in the video stream.')
parser.add_argument(
'-ht',
'--height',
dest='height',
type=int,
default=360,
help='Height of the frames in the video stream.')
args = parser.parse_args()
destAddr = (args.video_dest, port)
input_q = Queue(5) # fps is better if queue is higher but then more lags
output_q = Queue()
for i in range(1):
t = Thread(target=worker, args=(input_q, output_q))
t.daemon = True
t.start()
video_capture = WebcamVideoStream(
src='http://' + args.video_source + '/video',
width=args.width,
height=args.height).start()
fps = FPS().start()
lastTime = (time.time() - 30)
while True:
frame = video_capture.read()
input_q.put(frame)
t = time.time()
if output_q.empty():
pass # fill up queue
else:
font = cv2.FONT_HERSHEY_SIMPLEX
data = output_q.get()
rec_points = data['rect_points']
class_names = data['class_names']
class_colors = data['class_colors']
for point, name, color in zip(rec_points, class_names,
class_colors):
cv2.rectangle(frame, (int(point['xmin'] * args.width),
int(point['ymin'] * args.height)),
(int(point['xmax'] * args.width),
int(point['ymax'] * args.height)), color, 3)
cv2.rectangle(
frame, (int(point['xmin'] * args.width),
int(point['ymin'] * args.height)),
(int(point['xmin'] * args.width) + len(name[0]) * 6,
int(point['ymin'] * args.height) - 10), color, -1,
cv2.LINE_AA)
cv2.putText(frame, name[0], (int(point['xmin'] * args.width),
int(point['ymin'] * args.height)),
font, 0.3, (0, 0, 0), 1)
cv2.imwrite("img.jpg", frame)
sendFile("img.jpg", destAddr)
fps.update()
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
video_capture.stop()
cv2.destroyAllWindows()
|
python_GIL_01.py
|
# GIL全局终端锁 global interpreter Lock(cpython)
# python中的一个线程对应c语言中的一个线程
# GIL是的python同一时间只能有一个线程运行的cpu上执行字节码
# gil会根据执行的字节码行数以及时间片切换线程(时间片的时间),无法将多个线程映射到多个CPU上,gil遇到IO操作的情况下主动释放
# 对io操作来说,多线程和多进程效率差不多
# 共享变量和Queue
# pipy去GIL化的库
# Gil会根据执行的字节码行数以及时间片释放GIL,遇到IO操作的时候回释放GIL
# GIL在python2,Python3中的区别
# Python和Cpython的区别
# +=============dis查看字节码===================
# import dis
#
#
# def add(a):
# a = a + 1
# return a
#
#
# print(dis.dis(add))
# +=============dis查看字节码===================
total = 0
def add():
global total
for i in range(1000000):
total += 1
def des():
global total
for i in range(1000000):
total -= 1
import threading
thread1 = threading.Thread(target=add)
thread2 = threading.Thread(target=des)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(total)
|
_local_.py
|
import time
import threading
threading_local = threading.local()
"""
threading.local()这个方法的特点用来保存一个全局变量,但是这个全局变量只有在当前线程才能访问,
localVal.val = name这条语句可以储存一个变量到当前线程,如果在另外一个线程里面再次对localVal.val进行赋值,
那么会在另外一个线程单独创建内存空间来存储,也就是说在不同的线程里面赋值 不会覆盖之前的值,因为每个
线程里面都有一个单独的空间来保存这个数据,而且这个数据是隔离的,其他线程无法访问
这个东西可以用在那些地方呢,比如下载,现在都是多线程下载了,就像酷狗那样,可以同时下载很多首歌曲,那么
就可以利用这个方法来保存每个下载线程的数据,比如下载进度,下载速度之类的
所以 如果你在开发多线程应用的时候 需要每个线程保存一个单独的数据供当前线程操作,可以考虑使用这个方法,简单有效
其实这样的功能还有很多种方法可以实现,比如我们在主线程实例化一个dict对象,然后用线程的名字作为key,因为线程之间可以共享数据,
所以也可以实现相同功能,并且灵活性更多,不过代码就没那么优雅简洁了
"""
def test_3():
def print_data():
data = threading_local.data
print(f'{data} in {threading.current_thread().name}')
def target(name):
threading_local.data = name
print_data()
thread_a = threading.Thread(target=target, args=('AAA',), name='ThreadA')
thread_b = threading.Thread(target=target, args=('BBB',), name='ThreadB')
thread_a.start()
thread_b.start()
thread_a.join()
thread_b.join()
value_global = 0
value_local = threading.local()
def test1():
def run(arg):
global value_global
value_global = arg
time.sleep(2)
print(value_global)
for i in range(10):
thread = threading.Thread(target=run, args=(i,))
thread.start()
def test2():
def run(arg):
value_local.value = arg
time.sleep(2)
print(value_local.value)
for i in range(10):
thread = threading.Thread(target=run, args=(i,))
thread.start()
if __name__ == '__main__':
test1()
test2()
|
parallel.py
|
import _thread as thread
import logging
import operator
import sys
from queue import Empty
from queue import Queue
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit(object):
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
if fail_check is not None and fail_check(obj):
writer.write(msg, get_name(obj), 'failed', red)
else:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
fail_check is an additional failure check for cases that should display as a failure
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = sys.stderr
if ParallelStreamWriter.instance:
writer = ParallelStreamWriter.instance
else:
writer = ParallelStreamWriter(stream)
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(
events, writer, errors, results, msg, get_name, fail_check
)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State(object):
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit(object):
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter(object):
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
noansi = False
lock = Lock()
instance = None
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
def __init__(self, stream):
self.stream = stream
self.lines = []
self.width = 0
ParallelStreamWriter.instance = self
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, '')
def _write_ansi(self, msg, obj_index, status):
self.lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write(
"{:<{width}} ... {}\r\n".format(
msg + ' ' + obj_index, status, width=self.width
)
)
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.noansi:
self._write_noansi(msg, obj_index, status)
else:
self._write_ansi(msg, obj_index, color_func(status))
def get_stream_writer():
instance = ParallelStreamWriter.instance
if instance is None:
raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
return instance
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
|
applications_test.py
|
import pytest
import random
import six
import numpy as np
import keras_applications
from keras.applications import densenet
from keras.applications import inception_resnet_v2
from keras.applications import inception_v3
from keras.applications import mobilenet
try:
from keras.applications import mobilenet_v2
except ImportError:
from keras.applications import mobilenetv2 as mobilenet_v2
from keras.applications import nasnet
from keras.applications import resnet50
from keras.applications import vgg16
from keras.applications import vgg19
from keras.applications import xception
from keras.preprocessing import image
from keras import backend
from keras import layers
from keras import models
from keras import utils
from multiprocessing import Process, Queue
def keras_modules_injection(base_fun):
def wrapper(*args, **kwargs):
kwargs['backend'] = backend
kwargs['layers'] = layers
kwargs['models'] = models
kwargs['utils'] = utils
return base_fun(*args, **kwargs)
return wrapper
for (name, module) in [('resnet', keras_applications.resnet),
('resnet_v2', keras_applications.resnet_v2),
('resnext', keras_applications.resnext),
('efficientnet', keras_applications.efficientnet)]:
module.decode_predictions = keras_modules_injection(module.decode_predictions)
module.preprocess_input = keras_modules_injection(module.preprocess_input)
for app in dir(module):
if app[0].isupper() and callable(getattr(module, app)):
setattr(module, app, keras_modules_injection(getattr(module, app)))
setattr(keras_applications, name, module)
RESNET_LIST = [keras_applications.resnet.ResNet50,
keras_applications.resnet.ResNet101,
keras_applications.resnet.ResNet152]
RESNETV2_LIST = [keras_applications.resnet_v2.ResNet50V2,
keras_applications.resnet_v2.ResNet101V2,
keras_applications.resnet_v2.ResNet152V2]
RESNEXT_LIST = [keras_applications.resnext.ResNeXt50,
keras_applications.resnext.ResNeXt101]
MOBILENET_LIST = [(mobilenet.MobileNet, mobilenet, 1024),
(mobilenet_v2.MobileNetV2, mobilenet_v2, 1280)]
DENSENET_LIST = [(densenet.DenseNet121, 1024),
(densenet.DenseNet169, 1664),
(densenet.DenseNet201, 1920)]
NASNET_LIST = [(nasnet.NASNetMobile, 1056),
(nasnet.NASNetLarge, 4032)]
EFFICIENTNET_LIST = [(keras_applications.efficientnet.EfficientNetB0, 1280),
(keras_applications.efficientnet.EfficientNetB1, 1280),
(keras_applications.efficientnet.EfficientNetB2, 1408),
(keras_applications.efficientnet.EfficientNetB3, 1536),
(keras_applications.efficientnet.EfficientNetB4, 1792),
(keras_applications.efficientnet.EfficientNetB5, 2048)]
def keras_test(func):
"""Function wrapper to clean up after TensorFlow tests.
# Arguments
func: test function to clean up after.
# Returns
A function wrapping the input function.
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
if backend.backend() == 'tensorflow' or backend.backend() == 'cntk':
backend.clear_session()
return output
return wrapper
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
img = image.load_img('tests/data/elephant.jpg',
target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def _get_output_shape(model_fn, preprocess_input=None):
if backend.backend() == 'cntk':
# Create model in a subprocess so that
# the memory consumed by InceptionResNetV2 will be
# released back to the system after this test
# (to deal with OOM error on CNTK backend).
# TODO: remove the use of multiprocessing from these tests
# once a memory clearing mechanism
# is implemented in the CNTK backend.
def target(queue):
model = model_fn()
if preprocess_input is None:
queue.put(model.output_shape)
else:
x = _get_elephant(model.input_shape[1:3])
x = preprocess_input(x)
queue.put((model.output_shape, model.predict(x)))
queue = Queue()
p = Process(target=target, args=(queue,))
p.start()
p.join()
# The error in a subprocess won't propagate
# to the main process, so we check if the model
# is successfully created by checking if the output shape
# has been put into the queue
assert not queue.empty(), 'Model creation failed.'
return queue.get_nowait()
else:
model = model_fn()
if preprocess_input is None:
return model.output_shape
else:
x = _get_elephant(model.input_shape[1:3])
x = preprocess_input(x)
return (model.output_shape, model.predict(x))
@keras_test
def _test_application_basic(app, last_dim=1000, module=None):
if module is None:
output_shape = _get_output_shape(lambda: app(weights=None))
assert output_shape == (None, None, None, last_dim)
else:
output_shape, preds = _get_output_shape(
lambda: app(weights='imagenet'), module.preprocess_input)
assert output_shape == (None, last_dim)
names = [p[1] for p in module.decode_predictions(preds)[0]]
# Test correct label is in top 3 (weak correctness test).
assert 'African_elephant' in names[:3]
@keras_test
def _test_application_notop(app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False))
assert output_shape == (None, None, None, last_dim)
@keras_test
def _test_application_variable_input_channels(app, last_dim):
if backend.image_data_format() == 'channels_first':
input_shape = (1, None, None)
else:
input_shape = (None, None, 1)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
assert output_shape == (None, None, None, last_dim)
if backend.image_data_format() == 'channels_first':
input_shape = (4, None, None)
else:
input_shape = (None, None, 4)
output_shape = _get_output_shape(
lambda: app(weights=None, include_top=False, input_shape=input_shape))
assert output_shape == (None, None, None, last_dim)
@keras_test
def _test_app_pooling(app, last_dim):
output_shape = _get_output_shape(
lambda: app(weights=None,
include_top=False,
pooling=random.choice(['avg', 'max'])))
assert output_shape == (None, last_dim)
def test_resnet():
app = random.choice(RESNET_LIST)
module = keras_applications.resnet
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_resnetv2():
app = random.choice(RESNETV2_LIST)
module = keras_applications.resnet_v2
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_resnext():
app = random.choice(RESNEXT_LIST)
module = keras_applications.resnext
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_vgg():
app = random.choice([vgg16.VGG16, vgg19.VGG19])
module = vgg16
last_dim = 512
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_xception():
app = xception.Xception
module = xception
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_inceptionv3():
app = inception_v3.InceptionV3
module = inception_v3
last_dim = 2048
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_inceptionresnetv2():
app = inception_resnet_v2.InceptionResNetV2
module = inception_resnet_v2
last_dim = 1536
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_mobilenet():
app, module, last_dim = random.choice(MOBILENET_LIST)
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_densenet():
app, last_dim = random.choice(DENSENET_LIST)
module = densenet
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_nasnet():
app, last_dim = NASNET_LIST[0] # NASNetLarge is too heavy to test on Travis
module = nasnet
_test_application_basic(app, module=module)
# _test_application_notop(app, last_dim)
# _test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
def test_efficientnet():
app, last_dim = random.choice(EFFICIENTNET_LIST)
module = keras_applications.efficientnet
_test_application_basic(app, module=module)
_test_application_notop(app, last_dim)
_test_application_variable_input_channels(app, last_dim)
_test_app_pooling(app, last_dim)
if __name__ == '__main__':
pytest.main([__file__])
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
from numpy import ndarray
import transformers
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import tqdm, trange
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .models import Transformer, Pooling
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logger.info("Did not find folder {}".format(model_path))
if '\\' in model_path or model_path.count('/') > 1:
raise AttributeError("Path {} not found".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
logger.info("Try to download model from server: {}".format(model_path))
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(cache_folder, folder_name)
if not os.path.exists(model_path) or not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logger.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
model_path_tmp = model_path.rstrip("/").rstrip("\\")+"_part"
try:
zip_save_path = os.path.join(model_path_tmp, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path_tmp)
os.remove(zip_save_path)
os.rename(model_path_tmp, model_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path_tmp)
if e.response.status_code == 404:
logger.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logger.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logger.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param is_pretokenized: DEPRECATED - No longer used, will be removed in the future
:param device: Which torch.device to use for the computation
:param num_workers: DEPRECATED - No longer used, will be removed in the future
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
embeddings = embeddings.detach()
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict):
return len(next(iter(text.values())))
elif len(text) == 0 or isinstance(text[0], int):
return len(text)
else:
return sum([len(t) for t in text])
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns)
from numpy import random
from numpy.compat import asbytes
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
np.testing.assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_( -5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1 ,p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1 ,p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1 ,p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1 ,p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1 , p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
""" Make sure the cached every-other-Gaussian is reset.
"""
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
""" When the state is saved with a cached Gaussian, make sure the cached
Gaussian is restored.
"""
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
""" Make sure we can accept old state tuples that do not have the cached
Gaussian value.
"""
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
""" Ensure that the negative binomial results take floating point
arguments without truncation.
"""
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
""" Make sure the random distrobution return the correct value for a
given seed
"""
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[ 0.61879477158567997, 0.59162362775974664],
[ 0.88868358904449662, 0.89165480011560816],
[ 0.4575674820298663, 0.7781880808593471 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[ 1.34016345771863121, 1.73759122771936081],
[ 1.498988344300628, -0.2286433324536169 ],
[ 2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[ 31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[ 31, 3],
[-52, 41],
[-48, -66]])
np.testing.assert_array_equal(actual, desired)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[ 0.61879477158567997, 0.59162362775974664],
[ 0.88868358904449662, 0.89165480011560816],
[ 0.4575674820298663, 0.7781880808593471 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
np.testing.assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
np.testing.assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
np.testing.assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
np.testing.assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False,
p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
np.testing.assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays, and multidimensional versions of both:
for conv in [lambda x: x,
np.asarray,
lambda x: [(i, i) for i in x],
lambda x: np.asarray([(i, i) for i in x])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def test_shuffle_flexible(self):
# gh-4270
arr = [(0, 1), (2, 3)]
dt = np.dtype([('a', np.int32, 1), ('b', np.int32, 1)])
nparr = np.array(arr, dtype=dt)
a, b = nparr[0].copy(), nparr[1].copy()
for i in range(50):
np.random.shuffle(nparr)
assert_(a in nparr)
assert_(b in nparr)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5,4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
ma = np.ma.count_masked(a)
mb = np.ma.count_masked(b)
for i in range(50):
np.random.shuffle(a)
self.assertEqual(ma, np.ma.count_masked(a))
np.random.shuffle(b)
self.assertEqual(mb, np.ma.count_masked(b))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array([[ 1.45341850513746058e-02, 5.31297615662868145e-04],
[ 1.85366619058432324e-06, 4.19214516800110563e-03],
[ 1.58405155108498093e-04, 1.26252891949397652e-04]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
np.testing.assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[ 63.87858175501090585, 68.68407748911370447],
[ 65.77116116901505904, 47.09686762438974483],
[ 72.3828403199695174, 74.18408615260374006]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[ 0.54539444573611562, 0.45460555426388438],
[ 0.62345816822039413, 0.37654183177960598]],
[[ 0.55206000085785778, 0.44793999914214233],
[ 0.58964023305154301, 0.41035976694845688]],
[[ 0.59266909280647828, 0.40733090719352177],
[ 0.56974431743975207, 0.43025568256024799]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[ 1.08342649775011624, 1.00607889924557314],
[ 2.46628830085216721, 2.49668106809923884],
[ 0.68717433461363442, 1.69175666993575979]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[ 1.21975394418575878, 1.75135759791559775],
[ 1.44803115017146489, 1.22108959480396262],
[ 1.02176975757740629, 1.34431827623300415]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 24.60509188649287182, 28.54993563207210627],
[ 26.13476110204064184, 12.56988482927716078],
[ 31.71863275789960568, 33.30143302795922011]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 8, 7],
[17, 17],
[ 5, 12]])
np.testing.assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc = .123456789, scale = 2.0, size = (3, 2))
desired = np.array([[ 0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[ 1.10651090478803416, -0.69535848626236174]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[ 9, 9]])
np.testing.assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
np.testing.assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
np.testing.assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 0.66599721112760157, 0.52829452552221945],
[ 3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 1.09232835305011444, 0.8648196662399954 ],
[ 4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 16.50698631688883822, 36.54846706092654784],
[ 22.67886599981281748, 0.71617561058995771],
[ 65.72798501792723869, 86.84341601437161273]])
np.testing.assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[ 2, 2],
[ 6, 17],
[ 3, 6]])
np.testing.assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
np.testing.assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean= (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[ 0.29768848031692957, 10.]]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean= [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
rng = np.random.multivariate_normal
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
np.testing.assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df = 5, nonc = 5, size = (3, 2))
desired = np.array([[ 23.91905354498517511, 13.35324692733826346],
[ 31.22452661329736401, 16.60047399466177254],
[ 5.03461598262724586, 17.94973089023519464]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum = 5, dfden = 2, nonc = 1,
size = (3, 2))
desired = np.array([[ 1.40598099674926669, 0.34207973179285761],
[ 3.57715069265772545, 7.92632662577829805],
[ 0.43741599463544162, 1.1774208752428319 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc = .123456789, scale = 2.0, size = (3, 2))
desired = np.array([[ 2.80378370443726244, 3.59863924443872163],
[ 3.121433477601256, -0.33382987590723379],
[ 4.18552478636557357, 4.46410668111310471]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a =.123456789, size = (3, 2))
desired = np.array([[ 2.46852460439034849e+03, 1.41286880810518346e+03],
[ 5.28287797029485181e+07, 6.57720981047328785e+07],
[ 1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam = .123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
np.testing.assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a =.123456789, size = (3, 2))
desired = np.array([[ 0.02048932883240791, 0.01424192241128213],
[ 0.38446073748535298, 0.39499689943484395],
[ 0.00177699707563439, 0.13115505880863756]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale = 10, size = (3, 2))
desired = np.array([[ 13.8882496494248393, 13.383318339044731 ],
[ 20.95413364294492098, 21.08285015800712614],
[ 11.06066537006854311, 17.35468505778271009]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size = (3, 2))
desired = np.array([[ 0.77127660196445336, -6.55601161955910605],
[ 0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size = (3, 2))
desired = np.array([[ 0.96441739162374596, 0.89556604882105506],
[ 2.1953785836319808, 2.22243285392490542],
[ 0.6116915921431676, 1.50592546727413201]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape = 3, size = (3, 2))
desired = np.array([[ 5.50841531318455058, 6.62953470301903103],
[ 5.93988484943779227, 2.31044849402133989],
[ 7.54838614231317084, 8.012756093271868 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size = (3, 2))
desired = np.array([[ 1.34016345771863121, 1.73759122771936081],
[ 1.498988344300628, -0.2286433324536169 ],
[ 2.031033998682787, 2.17032494605655257]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df = 10, size = (3, 2))
desired = np.array([[ 0.97140611862659965, -0.08830486548450577],
[ 1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left = 5.12, mode = 10.23, right = 20.34,
size = (3, 2))
desired = np.array([[ 12.68117178949215784, 12.4129206149193152 ],
[ 16.20131377335158263, 16.25692138747600524],
[ 11.20400690911820263, 14.4978144835829923 ]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low = 1.23, high=10.54, size = (3, 2))
desired = np.array([[ 6.99097932346268003, 6.73801597444323974],
[ 9.50364421400426274, 9.53130618907631089],
[ 5.48995325769805476, 8.47493103280052118]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu = 1.23, kappa = 1.54, size = (3, 2))
desired = np.array([[ 2.28567572673902042, 2.89163838442285037],
[ 0.38198375564286025, 2.57638023113890746],
[ 1.19153771588353052, 1.83509849681825354]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean = 1.23, scale = 1.54, size = (3, 2))
desired = np.array([[ 3.82935265715889983, 5.13125249184285526],
[ 0.35045403618358717, 1.50832396872003538],
[ 0.24124319895843183, 0.22031101461955038]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a = 1.23, size = (3, 2))
desired = np.array([[ 0.97097342648766727, 0.91422896443565516],
[ 1.89517770034962929, 1.91414357960479564],
[ 0.67057783752390987, 1.39494046635066793]])
np.testing.assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a = 1.23, size = (3, 2))
desired = np.array([[66, 29],
[ 1, 1],
[ 3, 13]])
np.testing.assert_array_equal(actual, desired)
class TestThread:
""" make sure each state produces the same sequence even in threads """
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
np.testing.assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000,6))
if __name__ == "__main__":
run_module_suite()
|
nonblock.py
|
"""
The nonblock module provides a handler class (NonBlockHandler) which allows
a tasklet to momentarily run in a seperate thread.
If a tasklet yields NonBlock(), it's next iteration will be performed in a
thread.
The handler uses a threadpool with a default of 2 worker threads.
"""
from Queue import Queue, Empty, Full
from threading import Thread
import time
class Unblock(object):
"""yield Unblock(task) to process next iteration in a seperate thread.
"""
def __init__(self):
pass
class NonBlockHandler(object):
"""Allows a tasklet to yield Unblock(), which will cause the next
iteration to run in a seperate thread.
"""
active = False
handled_types = [Unblock]
def __init__(self, worker_count=2):
self.inbox = Queue()
self.outbox = Queue()
self.running = True
self.running_tasks = 0
self.workers = None
self.worker_count = worker_count
def status(self):
return self.running_tasks
def start_workers(self):
self.workers = set([Thread(target=self.worker_thread) for i in xrange(self.worker_count)])
for worker in self.workers:
worker.setDaemon(True)
worker.start()
def worker_thread(self):
while self.running:
task = self.inbox.get()
try:
r = task.next()
except Exception, e:
r = e
self.outbox.put((r,task))
def handle(self, unblock, task):
self.active = True
if self.workers is None: self.start_workers()
self.running_tasks += 1
self.inbox.put(task)
def pre_schedule(self):
while self.running_tasks > 0:
try:
r,task = self.outbox.get_nowait()
except Empty:
self.active = True
return
self.running_tasks -= 1
self.schedule.install(task, initial_value=r)
self.active = False
|
script.py
|
import os
import traceback
import threading
import shlex
from . import controller
class ScriptError(Exception):
pass
class ScriptContext:
def __init__(self, master):
self._master = master
def log(self, message, level="info"):
"""
Logs an event.
By default, only events with level "error" get displayed. This can be controlled with the "-v" switch.
How log messages are handled depends on the front-end. mitmdump will print them to stdout,
mitmproxy sends output to the eventlog for display ("e" keyboard shortcut).
"""
self._master.add_event(message, level)
def kill_flow(self, f):
"""
Kills a flow immediately. No further data will be sent to the client or the server.
"""
f.kill(self._master)
def duplicate_flow(self, f):
"""
Returns a duplicate of the specified flow. The flow is also
injected into the current state, and is ready for editing, replay,
etc.
"""
self._master.pause_scripts = True
f = self._master.duplicate_flow(f)
self._master.pause_scripts = False
return f
def replay_request(self, f):
"""
Replay the request on the current flow. The response will be added
to the flow object.
"""
return self._master.replay_request(f, block=True, run_scripthooks=False)
@property
def app_registry(self):
return self._master.apps
class Script:
"""
The instantiator should do something along this vein:
s = Script(argv, master)
s.load()
"""
def __init__(self, command, master):
self.command = command
self.argv = self.parse_command(command)
self.ctx = ScriptContext(master)
self.ns = None
self.load()
@classmethod
def parse_command(klass, command):
if not command or not command.strip():
raise ScriptError("Empty script command.")
if os.name == "nt": # Windows: escape all backslashes in the path.
backslashes = shlex.split(command, posix=False)[0].count("\\")
command = command.replace("\\", "\\\\", backslashes)
args = shlex.split(command)
args[0] = os.path.expanduser(args[0])
if not os.path.exists(args[0]):
raise ScriptError(
("Script file not found: %s.\r\n"
"If your script path contains spaces, "
"make sure to wrap it in additional quotes, e.g. -s \"'./foo bar/baz.py' --args\".") %
args[0])
elif not os.path.isfile(args[0]):
raise ScriptError("Not a file: %s" % args[0])
return args
def load(self):
"""
Loads a module.
Raises ScriptError on failure, with argument equal to an error
message that may be a formatted traceback.
"""
ns = {}
try:
exec(compile(open(self.argv[0], "rb").read(), self.argv[0], 'exec'), ns, ns)
except Exception as v:
raise ScriptError(traceback.format_exc(v))
self.ns = ns
r = self.run("start", self.argv)
if not r[0] and r[1]:
raise ScriptError(r[1][1])
def unload(self):
return self.run("done")
def run(self, name, *args, **kwargs):
"""
Runs a plugin method.
Returns:
(True, retval) on success.
(False, None) on nonexistent method.
(False, (exc, traceback string)) if there was an exception.
"""
f = self.ns.get(name)
if f:
try:
return (True, f(self.ctx, *args, **kwargs))
except Exception as v:
return (False, (v, traceback.format_exc(v)))
else:
return (False, None)
class ReplyProxy(object):
def __init__(self, original_reply, script_thread):
self.original_reply = original_reply
self.script_thread = script_thread
self._ignore_call = True
self.lock = threading.Lock()
def __call__(self, *args, **kwargs):
with self.lock:
if self._ignore_call:
self.script_thread.start()
self._ignore_call = False
return
self.original_reply(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.original_reply, k)
def _handle_concurrent_reply(fn, o, *args, **kwargs):
# Make first call to o.reply a no op and start the script thread.
# We must not start the script thread before, as this may lead to a nasty race condition
# where the script thread replies a different response before the normal reply, which then gets swallowed.
def run():
fn(*args, **kwargs)
# If the script did not call .reply(), we have to do it now.
reply_proxy()
script_thread = ScriptThread(target=run)
reply_proxy = ReplyProxy(o.reply, script_thread)
o.reply = reply_proxy
class ScriptThread(threading.Thread):
name = "ScriptThread"
def concurrent(fn):
if fn.__name__ in (
"request",
"response",
"error",
"clientconnect",
"serverconnect",
"clientdisconnect"):
def _concurrent(ctx, obj):
_handle_concurrent_reply(fn, obj, ctx, obj)
return _concurrent
raise NotImplementedError(
"Concurrent decorator not supported for this method.")
|
simplequeue.py
|
import json
import threading
import time
from anchore_engine.clients.services import http
from anchore_engine.subsys import logger
from anchore_engine.utils import get_threadbased_id
from anchore_engine.clients.services.internal import InternalServiceClient
import retrying
class LeaseAcquisitionFailedError(Exception):
pass
class SimpleQueueClient(InternalServiceClient):
__service__ = 'simplequeue'
def get_queues(self):
return self.call_api(http.anchy_get, '/queues')
def qlen(self, name):
resp = self.round_robin_call_api(http.anchy_get, 'queues/{queue}/qlen', path_params={'queue': name})
return int(resp)
def enqueue(self, name, inobj, qcount=0, forcefirst=False):
return self.round_robin_call_api(http.anchy_post, 'queues/{queue}', path_params={'queue': name}, query_params={'qcount': str(qcount), 'forcefirst': str(forcefirst)}, body=json.dumps(inobj))
def delete_message(self, name, receipt_handle):
return self.round_robin_call_api(http.anchy_delete, path='queues/{queue}', path_params={'queue': name}, query_params={'receipt_handle': receipt_handle})
def is_inqueue(self, name, inobj):
return self.round_robin_call_api(http.anchy_post, path='queues/{queue}/is_inqueue', path_params={'queue': name}, body=json.dumps(inobj))
def dequeue(self, name, visibility_timeout=0, max_wait_seconds=0):
return self.round_robin_call_api(http.anchy_get, 'queues/{queue}', path_params={'queue': name}, query_params={'wait_max_seconds': max_wait_seconds, 'visibility_timeout': visibility_timeout})
def update_message_visibility_timeout(self, name, receipt_handle, visibility_timeout):
return self.round_robin_call_api(http.anchy_put, 'queues/{queue}', path_params={'queue': name}, query_params={'receipt_handle': receipt_handle, 'visibility_timeout': visibility_timeout})
def create_lease(self, lease_id):
return self.round_robin_call_api(http.anchy_post, 'leases', query_params={'lease_id': lease_id})
def list_leases(self):
return self.round_robin_call_api(http.anchy_get, 'leases')
def describe_lease(self, lease_id):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}', path_params={'lease_id': lease_id})
def acquire_lease(self, lease_id, client_id, ttl):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}/acquire', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'ttl': ttl})
def release_lease(self, lease_id, client_id, epoch):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}/release', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'epoch': epoch})
def refresh_lease(self, lease_id, client_id, epoch, ttl):
return self.round_robin_call_api(http.anchy_put, 'leases/{lease_id}/ttl', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'ttl': ttl, 'epoch': epoch})
def run_target_with_queue_ttl(user_auth, queue, visibility_timeout, target, max_wait_seconds=0, autorefresh=True, retries=1, backoff_time=0, *args, **kwargs):
"""
Run a target function with the message pulled from the queue. If autorefresh=True, then run target as a thread and periodically check
for completion, updating the message visibility timeout to keep it fresh until the thread completes.
The function passed as target should expect the message object as the first argument, with *args appended after in the arg list.
:param user_auth:
:param queue:
:param max_wait_seconds:
:param visibility_timeout:
:param target:
:param autorefresh:
:param retries
:param backoff_time
:param args:
:param kwargs:
:return:
"""
client = SimpleQueueClient(as_account=user_auth[0], user=user_auth[0], password=user_auth[1])
ex = None
qobj = None
@retrying.retry(stop_max_attempt_number=retries, wait_incrementing_start=0, wait_incrementing_increment=backoff_time*1000)
def get_msg():
logger.debug("Checking queue {} for message with vis timeout {}".format(queue, visibility_timeout))
return client.dequeue(queue, max_wait_seconds=max_wait_seconds, visibility_timeout=visibility_timeout)
qobj = get_msg()
logger.debug('Got msg: {}'.format(qobj))
if not qobj:
logger.debug("Got empty message from queue - nothing to do")
return(True)
receipt_handle = qobj.get('receipt_handle')
msg_id = qobj.get('id')
if not receipt_handle:
raise Exception('No receipt handle found in queue message: {}'.format(qobj))
try:
# Relies upon the queue configuration of 1 outstanding message (inflight) at a time for serialization across hosts
t = time.time()
if qobj:
args = tuple([qobj] + list(args))
task = threading.Thread(target=target, args=args, kwargs=kwargs)
task.start()
if autorefresh:
# Run the task thread and monitor it, refreshing the task lease as needed
while task.isAlive():
# If we're halfway to the timeout, refresh to have a safe buffer
if time.time() - t > (visibility_timeout / 2):
# refresh the lease
for i in range(3):
try:
resp = client.update_message_visibility_timeout(name=queue, receipt_handle=receipt_handle, visibility_timeout=visibility_timeout)
if resp:
t = time.time()
logger.debug('Msg with handle {} refreshed with new expiration: {}'.format(receipt_handle, resp))
break
except Exception as e:
logger.exception('Error updating visibility timeout {}'.format(receipt_handle))
else:
logger.warn('Visibility refresh failed to succeed after retries. Msg {} may be replayed due to timeout'.format(msg_id))
task.join(timeout=1)
else:
# Just wait for thread to complete
task.join()
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
finally:
client.delete_message(queue, receipt_handle)
# Always delete the message. Other handlers will ensure things are queued ok.
def run_target_with_lease(user_auth, lease_id, target, ttl=60, client_id=None, autorefresh=True, *args, **kwargs):
"""
Run a handler within the context of a lease that is auto-refreshed as long as the handler runs.
Uses a thread for the handler and a monitor to watch state and update the lease ttl.
The leases are fairly slow to actuate, so expect to use this mechanism for longer running tasks where the lease duration should be > 10 sec
:param user_auth:
:param lease_id:
:param target:
:param args:
:param kwargs:
:return:
"""
handler_thread = threading.Thread(target=target, args=args, kwargs=kwargs)
client = SimpleQueueClient(as_account=user_auth[0], user=user_auth[0], password=user_auth[1])
# Ensure task lease exists for acquisition and create if not found
lease_resp = client.describe_lease(lease_id)
if not lease_resp:
lease_resp = client.create_lease(lease_id)
if not lease_resp:
raise Exception('Cannot locate or create a lease with id {}'.format(lease_id))
# Acquire the task lease and run the task
lease = None
try:
my_id = get_threadbased_id() if client_id is None else client_id
lease = client.acquire_lease(lease_id, client_id=my_id, ttl=ttl)
if not lease:
logger.debug('No lease returned from service, cannot proceed with task execution. Will retry on next cycle. Lease_id: {}'.format(lease_id))
raise LeaseAcquisitionFailedError('Could not acquire lease {} within timeout'.format(lease_id))
else:
logger.debug('Got lease: {}'.format(lease))
t = time.time()
logger.debug('Starting target={} with lease={} and client_id={}'.format(target.__name__, lease_id, lease['held_by']))
handler_thread.start()
if autorefresh:
# Run the task thread and monitor it, refreshing the task lease as needed
while handler_thread.isAlive():
# If we're halfway to the timeout, refresh to have a safe buffer
if time.time() - t > (ttl / 2):
# refresh the lease
for i in range(3):
try:
resp = client.refresh_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'], ttl=ttl)
logger.debug('Lease {} refreshed with response: {}'.format(lease_id, resp))
if resp:
lease = resp
t = time.time()
break
except Exception as e:
logger.exception('Error updating lease {}'.format(lease['id']))
else:
logger.warn('Lease refresh failed to succeed after retries. Lease {} may be lost due to timeout'.format(lease_id))
handler_thread.join(timeout=1)
else:
handler_thread.join()
logger.debug('Target thread returned')
except Exception as e:
logger.warn('Attempting to get lease {} failed: {}'.format(lease_id, e))
raise e
finally:
try:
if lease:
resp = client.release_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'])
logger.debug('Lease {} released with response: {}'.format(lease_id, resp))
else:
logger.debug('No lease found to release.')
except Exception as e:
logger.exception('Error releasing lease. Lease will expire on its own. Err: {}'.format(str(e)))
|
store_gc_service.py
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import threading
import time
from pants.pantsd.service.pants_service import PantsService
class StoreGCService(PantsService):
"""Store Garbage Collection Service.
This service both ensures that in-use files continue to be present in the engine's Store, and
performs occasional garbage collection to bound the size of the engine's Store.
"""
_LEASE_EXTENSION_INTERVAL_SECONDS = 30 * 60
_GARBAGE_COLLECTION_INTERVAL_SECONDS = 4 * 60 * 60
def __init__(self, scheduler):
super(StoreGCService, self).__init__()
self._scheduler = scheduler
self._logger = logging.getLogger(__name__)
@staticmethod
def _launch_thread(f):
t = threading.Thread(target=f)
t.daemon = True
t.start()
return t
def _extend_lease(self):
while 1:
# Use the fork lock to ensure this thread isn't cloned via fork while holding the graph lock.
with self.fork_lock:
self._logger.debug('Extending leases')
self._scheduler.lease_files_in_graph()
self._logger.debug('Done extending leases')
time.sleep(self._LEASE_EXTENSION_INTERVAL_SECONDS)
def _garbage_collect(self):
while 1:
time.sleep(self._GARBAGE_COLLECTION_INTERVAL_SECONDS)
# Grab the fork lock in case lmdb internally isn't fork-without-exec-safe.
with self.fork_lock:
self._logger.debug('Garbage collecting store')
self._scheduler.garbage_collect_store()
self._logger.debug('Done garbage collecting store')
def run(self):
"""Main service entrypoint. Called via Thread.start() via PantsDaemon.run()."""
jobs = (self._extend_lease, self._garbage_collect)
threads = [self._launch_thread(job) for job in jobs]
while not self.is_killed:
for thread in threads:
# If any job threads die, we want to exit the `PantsService` thread to cause
# a daemon teardown.
if not thread.isAlive():
self._logger.warn('thread {} died - aborting!'.format(thread))
return
thread.join(.1)
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import time
import errno
from unittest import TestCase
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert.pem")
class DummyPOP3_SSLHandler(DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
ssl_socket = ssl.wrap_socket(self.socket, certfile=CERTFILE,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(ssl_socket)
# Must try handshake before calling push()
self._ssl_accepting = True
self._do_ssl_handshake()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
DummyPOP3Handler.handle_read(self)
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(pop.sock.gettimeout() is None)
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts]
if SUPPORTS_SSL:
tests.append(TestPOP3_SSLClass)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
mavmonitor.py
|
from pymavlink import mavutil
from threading import Thread
from PyQt5.QtGui import QTextCursor
from maximum_roverdrive.tablemodel import TableModel
from maximum_roverdrive.astral_dusk import is_dark
_MAX_STATUSTEXT_MESSAGES = 256 # number of STATUSTEXT messages to display
_ACK_RESULTS = ['ACCEPTED', 'TEMP_REJECTED', 'DENIED', 'UNKNOWN (bad arguments?)', 'FAILED', 'IN_PROGRESS', 'CANCELLED']
class Location:
lat = None
lng = None
alt = None
class MavMonitor:
_location = Location()
def __init__(self, window=None, cmd_list=None):
if window is None or cmd_list is None:
self._received_messages = {}
else:
self._window = window
self._command_list = cmd_list
self._window.text_status.status_text_changed.connect(self.__update_text_status_messages)
self._connection = mavutil.mavlink_connection(window.combo_port.currentText())
self._received_messages = {}
self._model = TableModel()
self._is_headlight_on = False
self._keep_alive = False
self._is_alive = False
self.__init_table()
def __get_msg(self, msg):
msg_split = msg.split('.')
try:
return self._received_messages[msg_split[0]][msg_split[1]]
except KeyError:
return 'NO DATA'
def __init_table(self):
data = []
for msg in self._window.cfg.messages:
data.append([msg, 'NO DATA'])
self._model = TableModel(data, self._window.cfg.messages)
self._window.table_messages.setModel(self._model)
self._window.table_messages.resizeColumnsToContents()
def __update_table(self):
for row in range(self._model.rowCount()):
index = self._model.index(row, 0)
msg = self._model.data(index)
index = self._model.index(row, 1)
value = self.__get_msg(msg)
self._model.setData(index, value)
def __update_text_status_messages(self, severity, msg): # yet another PyQt5 thread workaround
prepend = ''
append = '<br>'
if severity < 6:
prepend = '<span style="color:'
append = '</span><br>'
if severity == 5:
prepend = f'{prepend} #53a0ed;">'
elif severity == 4:
prepend = f'{prepend} yellow;">'
elif severity == 3 or severity == 0:
prepend = f'{prepend} red;">'
elif 1 <= severity <= 2:
prepend = f'{prepend} orange;">'
cursor = QTextCursor(self._window.text_status.document())
cursor.setPosition(0)
self._window.text_status.setTextCursor(cursor)
self._window.text_status.insertHtml(f'{prepend}{msg}{append}\n')
if self._window.text_status.toPlainText().count('\n') >= _MAX_STATUSTEXT_MESSAGES:
cursor.movePosition(QTextCursor.End)
cursor.select(QTextCursor.LineUnderCursor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
def __update_thread(self):
""" this became a bit of spaghetti code as I tested and added features
if this project grows any legs, this method needs to be split into multiple sub-functions """
self._is_alive = True
while self._keep_alive:
key = None
msg_received = None
try:
msg_received = self.connection.recv_match(blocking=True, timeout=0.2).to_dict()
key = msg_received['mavpackettype']
del msg_received['mavpackettype']
self._received_messages.update({key: msg_received})
except AttributeError:
pass
self.__update_table()
if key == 'GLOBAL_POSITION_INT': # store location to avoid the delay in the pymavlink location() method
self._location.lat = float(msg_received['lat']) / 10000000.0
self._location.lng = float(msg_received['lon']) / 10000000.0
self._location.alt = float(msg_received['alt']) / 1000.0
if key == 'STATUSTEXT': # since we're capturing all traffic, keep a status history
self._window.text_status.status_text_changed.emit(int(msg_received['severity']), msg_received['text'])
if key == 'COMMAND_ACK':
try:
cmd = list(filter(lambda command: command[1] == msg_received['command'], self._command_list))[0][0]
cmd = cmd.replace('MAV_CMD_', '')
except IndexError:
cmd = f'COMMAND #{msg_received["command"]}'
result = msg_received['result']
self._window.statusBar().showMessage(f'{cmd}: {_ACK_RESULTS[result]}')
if self._window.checkbox_auto_headlights.isChecked(): # now check way too often if it's dark outside
if is_dark(self._location.lat, self._location.lng, self._location.alt) != self._is_headlight_on:
try:
relay = float(self._window.combo_headlight_relay.lineEdit().text())
on = 0 if self._window.checkbox_relay_active_low.isChecked() else 1
if is_dark(self._location.lat, self._location.lng, self._location.alt):
self._connection.set_relay(relay, on)
self._window.text_status.status_text_changed.emit(5, 'Headlights: ON')
self._is_headlight_on = True
else:
self._connection.set_relay(relay, on ^ 1)
self._window.text_status.status_text_changed.emit(5, 'Headlights: OFF')
self._is_headlight_on = False
except ValueError:
pass # likely to happen if relay value is not a number
self._is_alive = False
def start_updates(self):
self._keep_alive = True
Thread(target=self.__update_thread, daemon=True).start()
def add_msg(self, msg, attr, multiplier=1.0, low=0.0, high=0.0):
msg_fullname = msg + '.' + attr
self._model.updateDataParameters(msg_fullname, multiplier, low, high)
exists = False
for row in range(self._model.rowCount()):
index = self._model.index(row, 0)
msg = self._model.data(index)
if msg == msg_fullname:
exists = True
if not exists:
self._model.appendRow([msg_fullname, 'NO DATA'])
self._window.table_messages.resizeColumnsToContents()
# TODO: replace the 'NO DATA' row created when the user removes all messages
def remove_selected(self):
if self._model.rowCount() > 1:
row = self._window.table_messages.selectedIndexes()[0].row()
self._model.removeRow(row)
else: # don't remove last row
index = self._model.index(0, 0)
self._model.setData(index, 'NO DATA')
self._window.table_messages.resizeColumnsToContents()
def disconnect(self):
self._keep_alive = False
while self._is_alive: # wait for thread to stop
pass
self._connection.close()
@property
def connection(self):
return self._connection # expose the pymavlink connection
@property
def messages(self):
return self._received_messages # expose the message dictionary
@property
def location(self):
return self._location
|
RQSS_Extractor.py
|
import csv
import os
import sys
from argparse import ArgumentParser
from datetime import datetime
from multiprocessing.context import Process
from pathlib import Path
from typing import List, Optional, Union
from SPARQLWrapper import JSON, SPARQLWrapper
from EntitySchemaExtractor import EntitySchemaExtractor
from Queries import RQSS_QUERIES
def genargs(prog: Optional[str] = None) -> ArgumentParser:
parser = ArgumentParser(prog)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--input", help="Input RDF file of the dataset")
group.add_argument(
"--endpoint", help="The local/public endpoint of the dataset")
parser.add_argument(
"-f", "--format", help="Input file RDF format (nt, ttl)", default='nt')
parser.add_argument(
"-o", "--output-dir", help="Output destination directory to store extarcted components from the RDF input file", default=os.getcwd()+os.sep+'rqss_extractor_output')
parser.add_argument("-eExt", "--extract-external",
help="Extract all external sources uris (Wikibase referencing model) and save them on output dir. Collects data for computing Dimensions: Availability, Licensing, Security", action='store_true')
parser.add_argument("-sn", "--statement-nodes",
help="Extract all statement nodes uris (Wikibase referencing model) and save them on output dir. Collects data for computing Metric: Syntactic validity of reference triples", action='store_true')
parser.add_argument("-l", "--literals",
help="Extract all literal values in reference triples and save them on output dir. Collects data for computing Metric: Syntactic validity of references’ literals", action='store_true')
parser.add_argument("-fr", "--fact-ref-triples",
help="Extract all facts and their reference triples and save them on output dir. Collects data for computing Metric: Semantic validity of reference triples", action='store_true')
parser.add_argument("-rp", "--ref-properties",
help="Extract all reference properties and save them on output dir. Collects data for computing Metric: Consistency of references’ properties", action='store_true')
parser.add_argument("-rpvt", "--ref-prop-value-type",
help="Extract all reference properties and their object value types and save them on output dir. Collects data for computing Metric: Range consistency of reference triples", action='store_true')
parser.add_argument("-ri", "--ref-incomings",
help="Extract all reference nodes and the numebr of their incoming edges (prov:wasDerivedFrom) and save them on output dir. Collects data for computing Metric: Ratio of reference sharing", action='store_true')
parser.add_argument("-sr", "--statement-refs",
help="Extract all sattement nodes and the numebr of their references and save them on output dir. Collects data for computing Metric: Multiple references for facts", action='store_true')
parser.add_argument("-irf", "--item-refed-facts",
help="Extract all items and their referenced facts and save them on output dir. Collects data for computing Metric: Human-added references ratio", action='store_true')
parser.add_argument("-wes", "--wikidata-eschema-data",
help="Extract most up-to-date Wikidata EntitySchemas data from Wikidata directory and save them on output dir. Collects data for computing COMPLETENESS metrics", action='store_true')
parser.add_argument("-cfr", "--classes-facts-refs",
help="Extract all classes of referenced items, and their referenced facts and the reference properties and save them on output dir. Collects data for computing Schema completeness of references metrics", action='store_true')
return parser
def perform_query(endpoint: str, query: str) -> List[List[str]]:
ret_val = []
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
try:
print('Performing Query ...')
results = sparql.query().convert()
except Exception as e:
print('ERROR in performing query: {0}'.format(e))
return ret_val
for result in results["results"]["bindings"]:
row = []
for value in result:
row.append(result[str(value)]["value"])
ret_val.append(row)
return ret_val
def extract_external_uris(opts: ArgumentParser) -> int:
print('Started extracting External Sources’ URIs')
start_time = datetime.now()
external_uris = perform_query(
opts.endpoint, RQSS_QUERIES["get_all_external_sources_filter_wikimedia_distinct"])
output_file = os.path.join(opts.output_dir + os.sep + 'external_uris.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for uri in external_uris:
csv_writer.writerow(uri)
end_time = datetime.now()
print('External URIs have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting External URIs, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_statement_nodes_uris(opts: ArgumentParser) -> int:
print('Started extracting Statement Nodes URIs')
start_time = datetime.now()
statement_uris = perform_query(
opts.endpoint, RQSS_QUERIES["get_all_statement_nodes_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'statement_nodes_uris.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for uri in statement_uris:
csv_writer.writerow(uri)
end_time = datetime.now()
print('Statement Nodes URIs have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting Statement Nodes URIs, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_refrence_literals(opts: ArgumentParser) -> int:
print('Started extracting literal values in reference triples')
start_time = datetime.now()
ref_literals = perform_query(
opts.endpoint, RQSS_QUERIES["get_reference_literals_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'reference_literals.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for lit in ref_literals:
csv_writer.writerow(lit)
end_time = datetime.now()
print('References’ literal values have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting literal values in reference triples, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_fact_ref_triples(opts: ArgumentParser) -> int:
print('Started extracting fact subjects, predicate, and reference triples')
start_time = datetime.now()
fact_ref_triples = perform_query(
opts.endpoint, RQSS_QUERIES["get_fact_ref_triples_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'fact_ref_triples.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in fact_ref_triples:
csv_writer.writerow(row)
end_time = datetime.now()
print('Facts and their referece triples have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting facts and their reference triples, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_reference_properties(opts: ArgumentParser) -> int:
print('Started extracting properties that are used in references')
start_time = datetime.now()
ref_props = perform_query(
opts.endpoint, RQSS_QUERIES["get_ref_properties_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'ref_properties.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in ref_props:
csv_writer.writerow(row)
end_time = datetime.now()
print('Reference properties have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting reference properties, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_reference_properties_value_types(opts: ArgumentParser) -> int:
print('Started extracting reference properties and their object values types')
start_time = datetime.now()
ref_props = perform_query(
opts.endpoint, RQSS_QUERIES["get_ref_properties_object_value_types_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'ref_properties_object_value.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in ref_props:
csv_writer.writerow(row)
end_time = datetime.now()
print('Reference properties and object value types have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting reference properties and object value types, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_reference_node_incomings(opts: ArgumentParser) -> int:
print('Started extracting reference nodes and the numebr of their incoming edges (prov:wasDerivedFrom)')
start_time = datetime.now()
ref_props = perform_query(
opts.endpoint, RQSS_QUERIES["get_ref_nodes_incomings_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'ref_nodes_incomings.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in ref_props:
csv_writer.writerow(row)
end_time = datetime.now()
print('Reference nodes and the numebr of their incoming edges (prov:wasDerivedFrom) have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting reference nodes and the numebr of their incoming edges (prov:wasDerivedFrom), Duration: {0}'.format(
end_time - start_time))
return 0
def extract_statement_node_references(opts: ArgumentParser) -> int:
print('Started extracting statement nodes and the numebr of their references')
start_time = datetime.now()
ref_props = perform_query(
opts.endpoint, RQSS_QUERIES["get_sattement_nodes_ref_num_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'statement_node_ref_num.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in ref_props:
csv_writer.writerow(row)
end_time = datetime.now()
print('Statement nodes and the numebr of their references have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting statement nodes and the numebr of their references, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_item_referenced_facts(opts: ArgumentParser) -> int:
print('Started extracting items and their referenced facts')
start_time = datetime.now()
item_refed_facts = perform_query(
opts.endpoint, RQSS_QUERIES["get_item_refed_facts_wikimedia"])
output_file = os.path.join(
opts.output_dir + os.sep + 'item_refed_facts.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in item_refed_facts:
csv_writer.writerow(row)
end_time = datetime.now()
print('Items and their referenced facts have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting items and their referenced facts, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_wikidata_entityschemas_data(opts: ArgumentParser) -> int:
output_file_classes = os.path.join(
opts.output_dir + os.sep + 'eschemas_summarization_related_classes.data')
output_file_refed_fact_refs = os.path.join(
opts.output_dir + os.sep + 'eschemas_summarization_related_refed_fact_refs.data')
extractor = EntitySchemaExtractor()
eschema_data = extractor.get_entity_schemas_references_summary_from_wikidata()
with open(output_file_classes, 'w', newline='') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['eid', 'related class', 'related property'])
for eid in eschema_data:
for rel_class in eid.related_classes:
csv_writer.writerow([eid.e_id, rel_class, ''])
for eid in eschema_data:
for rel_prop in eid.related_properties:
csv_writer.writerow([eid.e_id, '', rel_prop])
with open(output_file_refed_fact_refs, 'w', newline='') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['eid', 'refed fact', 'ref predicate'])
for eid in eschema_data:
for refed_facts_ref in eid.refed_facts_refs:
for ref_predicate in refed_facts_ref.ref_predicates:
csv_writer.writerow(
[eid.e_id, refed_facts_ref.refed_fact, ref_predicate])
def extract_classes_facts_refs(opts: ArgumentParser) -> int:
print('Started extracting classes of referenced items, and their referenced facts and the reference properties')
start_time = datetime.now()
item_refed_facts = perform_query(
opts.endpoint, RQSS_QUERIES["get_classes_and_facts_and_refed_props"])
output_file = os.path.join(
opts.output_dir + os.sep + 'classes_facts_refs.data')
with open(output_file, 'w') as file_handler:
csv_writer = csv.writer(
file_handler, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in item_refed_facts:
csv_writer.writerow(row)
end_time = datetime.now()
print('Classes of referenced items, and their referenced facts and the reference properties have been written in the file: {0}'.format(
output_file))
print('DONE. Extracting classes of referenced items, and their referenced facts and the reference properties, Duration: {0}'.format(
end_time - start_time))
return 0
def extract_from_file(opts: ArgumentParser) -> int:
print('Local file extraction is not supported yet. Please use local/public endpoint.')
return 1
def extract_from_endpoint(opts: ArgumentParser) -> int:
# list of parallel processes
extractor_procs = []
if(opts.extract_external):
p = Process(target=extract_external_uris(opts))
extractor_procs.append(p)
if(opts.statement_nodes):
p = Process(target=extract_statement_nodes_uris(opts))
extractor_procs.append(p)
if(opts.literals):
p = Process(target=extract_refrence_literals(opts))
extractor_procs.append(p)
if(opts.fact_ref_triples):
p = Process(target=extract_fact_ref_triples(opts))
extractor_procs.append(p)
if(opts.ref_properties):
p = Process(target=extract_reference_properties(opts))
extractor_procs.append(p)
if(opts.ref_prop_value_type):
p = Process(target=extract_reference_properties_value_types(opts))
extractor_procs.append(p)
if(opts.ref_incomings):
p = Process(target=extract_reference_node_incomings(opts))
extractor_procs.append(p)
if(opts.statement_refs):
p = Process(target=extract_statement_node_references(opts))
extractor_procs.append(p)
if(opts.item_refed_facts):
p = Process(target=extract_item_referenced_facts(opts))
extractor_procs.append(p)
if(opts.wikidata_eschema_data):
p = Process(target=extract_wikidata_entityschemas_data(opts))
extractor_procs.append(p)
if(opts.classes_facts_refs):
p = Process(target=extract_classes_facts_refs(opts))
extractor_procs.append(p)
for proc in extractor_procs:
proc.start()
for proc in extractor_procs:
proc.join()
def RQSS_Extractor(argv: Optional[Union[str, List[str]]] = None, prog: Optional[str] = None) -> int:
if isinstance(argv, str):
argv = argv.split()
opts = genargs(prog).parse_args(argv if argv is not None else sys.argv[1:])
print('Creating output directory: {0}'.format(opts.output_dir))
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
if(opts.input != None):
return extract_from_file(opts)
if(opts.endpoint != None):
return extract_from_endpoint(opts)
return 0
if __name__ == '__main__':
RQSS_Extractor(sys.argv[1:])
|
keytar.py
|
#!/usr/bin/env python
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keytar flask app.
This program is responsible for exposing an interface to trigger cluster level
tests. For instance, docker webhooks can be configured to point to this
application in order to trigger tests upon pushing new docker images.
"""
import argparse
import collections
import datetime
import json
import logging
import os
import Queue
import shutil
import subprocess
import tempfile
import threading
import yaml
import flask
app = flask.Flask(__name__)
results = collections.OrderedDict()
_TEMPLATE = (
'python {directory}/test_runner.py -c "{config}" -t {timestamp} '
'-d {tempdir} -s {server}')
class KeytarError(Exception):
pass
def run_test_config(config):
"""Runs a single test iteration from a configuration."""
tempdir = tempfile.mkdtemp()
logging.info('Fetching github repository')
# Get the github repo and clone it.
github_config = config['github']
github_clone_args, github_repo_dir = _get_download_github_repo_args(
tempdir, github_config)
os.makedirs(github_repo_dir)
subprocess.call(github_clone_args)
current_dir = os.getcwd()
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M')
results[timestamp] = {
'timestamp': timestamp,
'status': 'Start',
'tests': {},
'docker_image': config['docker_image']
}
# Generate a test script with the steps described in the configuration,
# as well as the command to execute the test_runner.
with tempfile.NamedTemporaryFile(dir=tempdir, delete=False) as f:
tempscript = f.name
f.write('#!/bin/bash\n')
if 'before_test' in config:
# Change to the github repo directory, any steps to be run before the
# tests should be executed from there.
os.chdir(github_repo_dir)
for before_step in config['before_test']:
f.write('%s\n' % before_step)
server = 'http://localhost:%d' % app.config['port']
f.write(_TEMPLATE.format(
directory=current_dir, config=yaml.dump(config), timestamp=timestamp,
tempdir=tempdir, server=server))
os.chmod(tempscript, 0775)
try:
subprocess.call([tempscript])
except subprocess.CalledProcessError as e:
logging.warn('Error running test_runner: %s', str(e))
finally:
os.chdir(current_dir)
shutil.rmtree(tempdir)
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/test_results')
def test_results():
return json.dumps([results[x] for x in sorted(results)])
@app.route('/test_log')
def test_log():
# Fetch the output from a test.
log = '%s.log' % os.path.basename(flask.request.values['log_name'])
return (flask.send_from_directory('/tmp/testlogs', log), 200,
{'Content-Type': 'text/css'})
@app.route('/update_results', methods=['POST'])
def update_results():
# Update the results dict, called from the test_runner.
update_args = flask.request.get_json()
timestamp = update_args['timestamp']
results[timestamp].update(update_args)
return 'OK'
def _validate_request(keytar_password, request_values):
"""Checks a request against the password provided to the service at startup.
Raises an exception on errors, otherwise returns None.
Args:
keytar_password: password provided to the service at startup.
request_values: dict of POST request values provided to Flask.
Raises:
KeytarError: raised if the password is invalid.
"""
if keytar_password:
if 'password' not in request_values:
raise KeytarError('Expected password not provided in test_request!')
elif request_values['password'] != keytar_password:
raise KeytarError('Incorrect password passed to test_request!')
@app.route('/test_request', methods=['POST'])
def test_request():
"""Respond to a post request to execute tests.
This expects a json payload containing the docker webhook information.
If this app is configured to use a password, the password should be passed in
as part of the POST request.
Returns:
HTML response.
"""
try:
_validate_request(app.config['password'], flask.request.values)
except KeytarError as e:
flask.abort(400, str(e))
webhook_data = flask.request.get_json()
repo_name = webhook_data['repository']['repo_name']
test_configs = [c for c in app.config['keytar_config']['config']
if c['docker_image'] == repo_name]
if not test_configs:
return 'No config found for repo_name: %s' % repo_name
for test_config in test_configs:
test_worker.add_test(test_config)
return 'OK'
def handle_cluster_setup(cluster_setup):
"""Setups up a cluster.
Currently only GKE is supported. This step handles setting up credentials and
ensuring a valid project name is used.
Args:
cluster_setup: YAML cluster configuration.
Raises:
KeytarError: raised on invalid setup configurations.
"""
if cluster_setup['type'] != 'gke':
return
if 'keyfile' not in cluster_setup:
raise KeytarError('No keyfile found in GKE cluster setup!')
# Add authentication steps to allow keytar to start clusters on GKE.
gcloud_args = ['gcloud', 'auth', 'activate-service-account',
'--key-file', cluster_setup['keyfile']]
logging.info('authenticating using keyfile: %s', cluster_setup['keyfile'])
subprocess.call(gcloud_args)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = cluster_setup['keyfile']
# Ensure that a project name is correctly set. Use the name if provided
# in the configuration, otherwise use the current project name, or else
# the first available project name.
if 'project_name' in cluster_setup:
logging.info('Setting gcloud project to %s', cluster_setup['project_name'])
subprocess.call(
['gcloud', 'config', 'set', 'project', cluster_setup['project_name']])
else:
config = subprocess.check_output(
['gcloud', 'config', 'list', '--format', 'json'])
project_name = json.loads(config)['core']['project']
if not project_name:
projects = subprocess.check_output(['gcloud', 'projects', 'list'])
first_project = projects[0]['projectId']
logging.info('gcloud project is unset, setting it to %s', first_project)
subprocess.check_output(
['gcloud', 'config', 'set', 'project', first_project])
def handle_install_steps(keytar_config):
"""Runs all config installation/setup steps.
Args:
keytar_config: YAML keytar configuration.
"""
if 'install' not in keytar_config:
return
install_config = keytar_config['install']
for cluster_setup in install_config.get('cluster_setup', []):
handle_cluster_setup(cluster_setup)
# Install any dependencies using apt-get.
if 'dependencies' in install_config:
subprocess.call(['apt-get', 'update'])
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
for dep in install_config['dependencies']:
subprocess.call(
['apt-get', 'install', '-y', '--no-install-recommends', dep])
# Run any additional commands if provided.
for step in install_config.get('extra', []):
os.system(step)
# Update path environment variable.
for path in install_config.get('path', []):
os.environ['PATH'] = '%s:%s' % (path, os.environ['PATH'])
def _get_download_github_repo_args(tempdir, github_config):
"""Get arguments for github actions.
Args:
tempdir: Base directory to git clone into.
github_config: Configuration describing the repo, branches, etc.
Returns:
([string], string) for arguments to pass to git, and the directory to
clone into.
"""
repo_prefix = github_config.get('repo_prefix', 'github')
repo_dir = os.path.join(tempdir, repo_prefix)
git_args = ['git', 'clone', 'https://github.com/%s' % github_config['repo'],
repo_dir]
if 'branch' in github_config:
git_args += ['-b', github_config['branch']]
return git_args, repo_dir
class TestWorker(object):
"""A simple test queue. HTTP requests append to this work queue."""
def __init__(self):
self.test_queue = Queue.Queue()
self.worker_thread = threading.Thread(target=self.worker_loop)
self.worker_thread.daemon = True
def worker_loop(self):
# Run forever, executing tests as they are added to the queue.
while True:
item = self.test_queue.get()
run_test_config(item)
self.test_queue.task_done()
def start(self):
self.worker_thread.start()
def add_test(self, config):
self.test_queue.put(config)
test_worker = TestWorker()
def main():
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(description='Run keytar')
parser.add_argument('--config_file', help='Keytar config file', required=True)
parser.add_argument('--password', help='Password', default=None)
parser.add_argument('--port', help='Port', default=8080, type=int)
keytar_args = parser.parse_args()
with open(keytar_args.config_file, 'r') as yaml_file:
yaml_config = yaml_file.read()
if not yaml_config:
raise ValueError('No valid yaml config!')
keytar_config = yaml.load(yaml_config)
handle_install_steps(keytar_config)
if not os.path.isdir('/tmp/testlogs'):
os.mkdir('/tmp/testlogs')
test_worker.start()
app.config['port'] = keytar_args.port
app.config['password'] = keytar_args.password
app.config['keytar_config'] = keytar_config
app.run(host='0.0.0.0', port=keytar_args.port, debug=True)
if __name__ == '__main__':
main()
|
QQCar.py
|
###################################################
# 智能小车1.0
#
# @author chenph
# @date 2018/5/15
###################################################
# 光敏传感器 红 黑 任意
# 超声波传感器-发送 红 黑 任意 * 2
# 超声波传感器-接收
# 红外避障传感器-左 红 黑 任意
# 红外避障传感器-右 红 黑 任意
# 无源蜂鸣器 红 黑 任意
# 寻迹传感器 红 黑 任意
# 七彩大灯R-G-B 黑 任意 * 3
# 超声波云台舵机-左右转 任意
# 摄像头云台舵机-左右转 红 黑 任意
# 摄像头云台舵机-上下转 红 黑 任意
# 左轮in1-in2 任意 * 2
# 右轮in1-in2 任意 * 2
# 接电:红 8
# 接地:黑 9
# 其他:17
import threading
import os
from PyCode.Modules.RGBLightModule import *
from PyCode.Modules.ServoModule import *
from PyCode.Modules.WheelModule import *
from PyCode.Modules.LCDModule import *
from PyCode.Sensors.BeeSensor import *
from PyCode.Sensors.InfraredSensor import *
from PyCode.Sensors.LightSensor import *
from PyCode.Sensors.TraceSensor import *
from PyCode.Sensors.UltrasonicSensor import *
class QQCar:
def __init__(self):
# 初始化智能小车使用控制脚--------------
self.PIN_LIGHT = 8 # 01:光敏
self.PIN_ULTRASON_TRIG = 11 # 02:超声波-发射
self.PIN_ULTRASON_ECHO = 13 # 03:超声波-接收
self.PIN_INFRARED_L = 37 # 04:左避障
self.PIN_INFRARED_R = 7 # 05:右避障
self.PIN_BEE = 26 # 06:蜂鸣
self.PIN_TRACE = 35 # 07:寻迹
self.PIN_LIGHT_R = 29 # 08:大灯
self.PIN_LIGHT_G = 31 # 09:大灯
self.PIN_LIGHT_B = 33 # 10:大灯
self.PIN_SERVO_U = 23 # 11:超声波云台
self.PIN_SERVO_CH = 19 # 12:摄像头水平云台
self.PIN_SERVO_CV = 21 # 13:摄像头垂直云台
self.WHEEL_L_IN1 = 32 # 14:左轮
self.WHEEL_L_IN2 = 36 # 15:左轮
self.WHEEL_R_IN1 = 40 # 16:右轮
self.WHEEL_R_IN2 = 38 # 17:右轮
self.LCD_SDA = 3 # 18:液晶屏
self.LCD_SCL = 5 # 19:液晶屏
# 小车状态
self.status = 'normal'
self.code = '0'
# web页面的按钮状态
self.autocross = 'off'
self.cruise = 'off'
self.light = 'off'
# 初始化树莓派gpio控制脚----------------
# 大灯
self.rgbLightModule = RGBLightModule(self.PIN_LIGHT_R, self.PIN_LIGHT_G, self.PIN_LIGHT_B)
# 超声波云台
self.servoModule_U = ServoModule(self.PIN_SERVO_U)
# 摄像头水平云台
self.servoModule_CH = ServoModule(self.PIN_SERVO_CH)
# 摄像头垂直云台
self.servoModule_CV = ServoModule(self.PIN_SERVO_CV)
# 车轮控制
self.wheelModule = WheelModule(self.WHEEL_L_IN1, self.WHEEL_L_IN2, self.WHEEL_R_IN1, self.WHEEL_R_IN2)
# 蜂鸣器
self.beeSensor = BeeSensor(self.PIN_BEE)
# 左避障
self.infraredSensor_L = InfraredSensor(self.PIN_INFRARED_L)
# 右避障
self.infraredSensor_R = InfraredSensor(self.PIN_INFRARED_R)
# 光敏
self.lightSensor = LightSensor(self.PIN_LIGHT)
# 寻迹
self.traceSensor = TraceSensor(self.PIN_TRACE)
# 超声波
self.ultrasonicSensor = UltrasonicSensor(self.PIN_ULTRASON_TRIG, self.PIN_ULTRASON_ECHO)
# LCD,此处的bus和addr请根据实际地址调整
self.screen = Screen(bus=1, addr=0x3f, cols=16, rows=2)
self.screen.enable_backlight()
# 启动传感器
sensorsThread = threading.Thread(target=self.start)
sensorsThread.start()
# 启动液晶
lcdThread = threading.Thread(target=self.lcd)
lcdThread.start()
# 启动传感器
def start(self):
while True:
if self.autocross == 'on':
# 红外检测障碍物
if self.status != 'warning':
if self.infraredSensor_L.getStatus() == InfraredSensor.INFRARED_SENSOR_BLOCK:
self.status = 'warning'
self.code = 'L'
self.beeSensor.play()
if self.status != 'warning':
if self.infraredSensor_R.getStatus() == InfraredSensor.INFRARED_SENSOR_BLOCK:
self.status = 'warning'
self.code = 'R'
self.beeSensor.play()
# 超声波检测障碍物
if self.status != 'warning':
if self.ultrasonicSensor.getDistance() <= 0.3:
self.status = 'warning'
self.code = 'U'
self.beeSensor.play()
if self.cruise == 'on':
# 寻迹
if self.traceSensor.getStatus() == TraceSensor.TRACE_SENSOR_ONWAY:
self.beeSensor.play(0.5)
if self.light == 'off':
# 检测光亮
if self.lightSensor.getStatus() == LightSensor.LIGHT_SENSOR_DARK:
self.rgbLightModule.turnOn()
else:
self.rgbLightModule.turnOff()
time.sleep(2)
self.status = 'normal'
# 设置液晶屏
def lcd(self):
while True:
if self.status == 'normal':
self.screen.display_data(time.strftime("%Y-%m-%d %H:%M"), 'CPU:' + self.getCPUtemperature() + 'C')
else:
for n in range(3):
self.screen.warning('-----Waring-----', 'Look Out! '+self.code)
self.status = 'normal'
time.sleep(2)
# 前进的代码
def forward(self):
self.wheelModule.forward()
# 后退
def backOff(self):
self.wheelModule.backOff()
# 左转
def leftTurn(self):
self.wheelModule.leftTurn()
# 右转
def rightTurn(self):
self.wheelModule.rightTurn()
# 停车
def stop(self):
self.wheelModule.stop()
# 超声波云台,左转
def servoUltrasonicTurnLeft(self):
self.servoModule_U.turnLeft()
# 超声波云台,右转
def servoUltrasonicTurnRight(self):
self.servoModule_U.turnRight()
# 摄像头水平云台,左转
def servoCameraHTurnLeft(self):
self.servoModule_CH.turnLeft()
# 摄像头水平云台,右转
def servoCameraHTurnRight(self):
self.servoModule_CH.turnRight()
# 摄像头垂直云台,上翻
def servoCameraVTurnUp(self):
self.servoModule_CV.turnLeft()
# 摄像头垂直云台,下翻
def servoCameraVTurnDown(self):
self.servoModule_CV.turnRight()
# 开灯
def turnOnLight(self):
self.light = 'on'
self.rgbLightModule.turnOn()
# 关灯
def turnOffLight(self):
self.light = 'off'
self.rgbLightModule.turnOff()
# 开启避障
def turnOnAutoCross(self):
self.autocross = 'on'
# 关闭避障
def turnOffAutoCross(self):
self.autocross = 'off'
# 开启寻迹
def turnOnCruise(self):
self.cruise = 'on'
# 关闭寻迹
def turnOffCruise(self):
self.cruise = 'off'
# 获取cpu温度
def getCPUtemperature(self):
res = os.popen('vcgencmd measure_temp').readline()
return (res.replace("temp=", "").replace("'C\n", ""))
|
batchsim3.py
|
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from pybullet_utils import bullet_client
import panda_sim
import time
useGUI = False
timeStep = 1./60.
# Importing the libraries
import os
import time
import multiprocessing as mp
from multiprocessing import Process, Pipe
pandaEndEffectorIndex = 11 #8
pandaNumDofs = 7
_RESET = 1
_CLOSE = 2
_EXPLORE = 3
def ExploreWorker(rank, num_processes, childPipe, args):
print("hi:",rank, " out of ", num_processes)
import pybullet as op1
import pybullet_data as pd
logName=""
p1=0
n = 0
space = 2
simulations=[]
sims_per_worker = 10
offsetY = rank*space
while True:
n += 1
try:
# Only block for short times to have keyboard exceptions be raised.
if not childPipe.poll(0.0001):
continue
message, payload = childPipe.recv()
except (EOFError, KeyboardInterrupt):
break
if message == _RESET:
if (useGUI):
p1 = bullet_client.BulletClient(op1.GUI)
else:
p1 = bullet_client.BulletClient(op1.DIRECT)
p1.setTimeStep(timeStep)
p1.setPhysicsEngineParameter(numSolverIterations=8)
p1.setPhysicsEngineParameter(minimumSolverIslandSize=100)
p1.configureDebugVisualizer(p1.COV_ENABLE_Y_AXIS_UP,1)
p1.configureDebugVisualizer(p1.COV_ENABLE_RENDERING,0)
p1.setAdditionalSearchPath(pd.getDataPath())
p1.setGravity(0,-9.8,0)
logName = str("batchsim")+str(rank)
for j in range (3):
offsetX = 0#-sims_per_worker/2.0*space
for i in range (sims_per_worker):
offset=[offsetX,0, offsetY]
sim = panda_sim.PandaSim(p1, offset)
simulations.append(sim)
offsetX += space
offsetY += space
childPipe.send(["reset ok"])
p1.configureDebugVisualizer(p1.COV_ENABLE_RENDERING,1)
for i in range (100):
p1.stepSimulation()
logId = p1.startStateLogging(op1.STATE_LOGGING_PROFILE_TIMINGS,logName)
continue
if message == _EXPLORE:
sum_rewards=rank
if useGUI:
numSteps = int(20000)
else:
numSteps = int(5)
for i in range (numSteps):
for s in simulations:
s.step()
p1.stepSimulation()
#print("logId=",logId)
#print("numSteps=",numSteps)
childPipe.send([sum_rewards])
continue
if message == _CLOSE:
p1.stopStateLogging(logId)
childPipe.send(["close ok"])
break
childPipe.close()
if __name__ == "__main__":
mp.freeze_support()
if useGUI:
num_processes = 1
else:
num_processes = 12
processes = []
args=[0]*num_processes
childPipes = []
parentPipes = []
for pr in range(num_processes):
parentPipe, childPipe = Pipe()
parentPipes.append(parentPipe)
childPipes.append(childPipe)
for rank in range(num_processes):
p = mp.Process(target=ExploreWorker, args=(rank, num_processes, childPipes[rank], args))
p.start()
processes.append(p)
for parentPipe in parentPipes:
parentPipe.send([_RESET, "blaat"])
positive_rewards = [0]*num_processes
for k in range(num_processes):
#print("reset msg=",parentPipes[k].recv()[0])
msg = parentPipes[k].recv()[0]
for parentPipe in parentPipes:
parentPipe.send([_EXPLORE, "blaat"])
positive_rewards = [0]*num_processes
for k in range(num_processes):
positive_rewards[k] = parentPipes[k].recv()[0]
#print("positive_rewards=",positive_rewards[k])
for parentPipe in parentPipes:
parentPipe.send([_EXPLORE, "blaat"])
positive_rewards = [0]*num_processes
for k in range(num_processes):
positive_rewards[k] = parentPipes[k].recv()[0]
#print("positive_rewards=",positive_rewards[k])
msg = positive_rewards[k]
for parentPipe in parentPipes:
parentPipe.send([_EXPLORE, "blaat"])
positive_rewards = [0]*num_processes
for k in range(num_processes):
positive_rewards[k] = parentPipes[k].recv()[0]
#print("positive_rewards=",positive_rewards[k])
for parentPipe in parentPipes:
parentPipe.send([_CLOSE, "pay2"])
for p in processes:
p.join()
#now we merge the separate json files into a single one
fnameout = 'batchsim.json'
count = 0
outfile = open(fnameout, "w+")
outfile.writelines(["{\"traceEvents\":[\n"])
numFiles = num_processes
for num in range(numFiles):
print("num=",num)
fname = 'batchsim%d_0.json' % (num)
with open(fname) as infile:
for line in infile:
if "pid" in line:
line = line.replace('\"pid\":1', '\"pid\":'+str(num))
if num < (numFiles-1) and not "{}}," in line:
line = line.replace('{}}', '{}},')
print("line[",count,"]=",line)
outfile.write(line)
count += 1
print ("count=",count)
outfile.writelines(["],\n"])
outfile.writelines(["\"displayTimeUnit\": \"ns\"}\n"])
outfile.close()
|
Tron_Environment.py
|
from enum import Enum
import time
import copy
import sys
import subprocess
import random
from threading import Thread
def monitorFile(connection, queue):
while True:
try:
line = connection.readline()
except:
break
if not line:
queue.append(None)
break
line = line.rstrip("\n")
queue.append(line)
class Networker:
def __init__(self):
self.processes = []
self.stdoutQueues = []
self.stderrQueues = []
def startPlayer(self, command):
self.processes.append(subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True, shell=True))
self.stdoutQueues.append([])
self.stderrQueues.append([])
stdoutMonitor = Thread(target=monitorFile, args=(self.processes[-1].stdout, self.stdoutQueues[-1]))
stdoutMonitor.daemon = True
stdoutMonitor.start()
stderrMonitor = Thread(target=monitorFile, args=(self.processes[-1].stderr, self.stderrQueues[-1]))
stderrMonitor.daemon = True
stderrMonitor.start()
def initialNetworking(self):
for a in range(len(self.processes)):
self.processes[a].stdin.write(str(a+1) + "\n")
self.processes[a].stdin.flush()
def serializeMap(self, map, isSecond):
returnString = ""
for row in map:
for tile in row:
returnString += str(tile if isSecond == False or tile == 0 or tile == 5 else tile-1 if tile == 2 or tile == 4 else tile+1) + " "
return returnString
def frameNetworking(self, map, isSecond):
self.processes[isSecond].stdin.write(self.serializeMap(map, isSecond) + "\n")
self.processes[isSecond].stdin.flush()
# Return move
startingTime = time.time()
while len(self.stdoutQueues[isSecond]) == 0:
time.sleep(0.01)
if time.time() - startingTime > 2.5: return None
return int(self.stdoutQueues[isSecond].pop())
def killAll(self):
for a in range(len(self.processes)):
self.processes[a].stdin.write("KILL\n")
self.processes[a].stdin.flush()
self.processes[a].kill()
class Direction(Enum):
north = 0
east = 1
south = 2
west = 3
class Tile(Enum):
empty = 0
player1 = 1
player2 = 2
takenByPlayer1 = 3
takenByPlayer2 = 4
wall = 5
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
networker = Networker()
if len(sys.argv) >= 2:
for a in range(-2, 0):
print(sys.argv[a])
networker.startPlayer(sys.argv[a])
else:
# Network initialization
for a in range(2):
networker.startPlayer(input("Enter the start command for the player " + str(a) + ":"))
networker.initialNetworking()
# Map setup
width = 16
height = 16
gameMap = [[Tile.empty.value for a in range(width)] for b in range(height)]
# Decide if map is mirrored or rotationally symmetric. 0 Indicates rotational symmetry, 1 vertical mirroring, and 2 horizontal mirroring.
isMirror = random.randint(0, 2)
# Place pieces on map
positions = []
positions.append(Point(random.randint(0, width-1), random.randint(0, height-1)))
positions.append(Point(positions[0].x if isMirror == 1 else width-1-positions[0].x, positions[0].y if isMirror == 2 else height-1-positions[0].y))
prob_wall = 0.12
for a in range(0, int((height+1) / 2) if isMirror != 2 else height):
for b in range(0, width if isMirror != 2 else int((width+1) / 2)):
if random.random() < prob_wall:
gameMap[a][b] = 5
gameMap[a if isMirror == 2 else height-1-a][b if isMirror == 1 else width-1-b] = 5
gameMap[positions[0].y][positions[0].x] = Tile.player1.value
gameMap[positions[1].y][positions[1].x] = Tile.player2.value
for a in range(0, height):
s = ""
for b in range(0, width):
s += str(str(gameMap[a][b]))
print(s)
# Game loop
frames = []
isDone = False
isTied = False
winner = -1
frames.append(copy.deepcopy(gameMap))
while isDone == False:
for a in range(2):
try:
move = networker.frameNetworking(copy.deepcopy(frames[-1]), a)
gameMap[positions[a].y][positions[a].x] = Tile.takenByPlayer1.value if a == 0 else Tile.takenByPlayer2.value
if move == None or move < 0 or move > 3:
if move == None: print("Player " + str(a+1) + " timed out!")
else: print("Player " + str(a+1) + " sent us a move that is not between 0 and 3!")
winner = 1 + (0 if a == 1 else 1)
if isDone == True: isTied = True
isDone = True
continue
if move == Direction.north.value: positions[a].y += 1
elif move == Direction.south.value: positions[a].y -= 1
elif move == Direction.east.value: positions[a].x += 1
elif move == Direction.west.value: positions[a].x -= 1
# check if legitimate move
if positions[a].x >= width or positions[a].y >= height or positions[a].x < 0 or positions[a].y < 0 or gameMap[positions[a].y][positions[a].x] != Tile.empty.value:
if positions[a].x >= width or positions[a].y >= height or positions[a].x < 0 or positions[a].y < 0: print("Player " + str(a+1) + " fell off the map!")
elif gameMap[positions[a].y][positions[a].x] == Tile.player1.value or gameMap[positions[a].y][positions[a].x] == Tile.player2.value: print("Player " + str(a+1) + " collided with another player!")
else: print("Player " + str(a+1) + " collide with a tile that has already been taken!")
winner = 1 + (0 if a == 1 else 1)
if isDone == True: isTied = True
isDone = True
continue
gameMap[positions[a].y][positions[a].x] = Tile.player1.value if a == 0 else Tile.player2.value
except Exception as e:
print("There was an error while running the game!")
print(str(e))
winner = 1 + (0 if a == 1 else 1)
if isDone == True: isTied = True
isDone = True
continue
frames.append(copy.deepcopy(gameMap))
# Cleanup
if isTied == True: print("The game ended in a tie!")
else: print("Player " + str(winner) + " won!")
try:
networker.killAll()
except:
pass
contents = "%d %d %d\n" % (width, height, len(frames))
for frame in frames: contents += " ".join(str(tile) for row in frame for tile in row) + "\n"
filename = str(int(time.time()*10)) + ".trn"
open(filename, "w").write(contents)
print("Output file is stored at " + filename)
|
apk_install.py
|
from os import environ
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from sys import argv
apkpath = "~/Downloads/app-castrolZoomAlpha-release.apk"
aaptpath = "~/Library/Android/sdk/build-tools/28.0.1"
adbpath = "~/Library/Android/sdk/platform-tools"
pkgname = ''
"""
Get list of android devices connected
"""
def get_devices():
try:
res = Popen(['adb', 'devices'], stdout=PIPE, stderr=STDOUT)
except:
print ('Either adb could not found in system PATH or adbpath variable not set where adb is located')
exit(0)
out = res.communicate()[0]
ds = out.split('\n')
ds = filter(lambda x: x != '' and 'devices' not in x,ds)
ds = map(lambda x: x.strip('\tdevice'),ds)
return res.returncode == 0, ds
"""
Perform adb uninstall and report result as succees or fail
"""
def uninstall_apk(devid, devmodel, pkgname):
cmd = 'adb -s {} uninstall {}'.format(devid,pkgname)
res = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
out = res.communicate()[0]
r = (res.returncode == 0 and 'success' in out.lower())
if r:
print ('uninstall success on device {}'.format(devmodel))
else:
print ('uninstall failed on device {}'.format(devmodel))
"""
Perform adb install and report result as succees or fail
"""
def install_apk(devid, devmodel, apkpath):
cmd = 'adb -s {} install {}'.format(devid,apkpath)
res = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
print ("Installing on {}".format(devmodel))
out = res.communicate()[0]
r = (res.returncode == 0 and 'success' in out.lower())
if r:
print ('Installation success on device {}'.format(devmodel))
else:
print ('Installation failed on device{}'.format(devmodel))
return r
"""
Find out the package name of the apk when package name is not provided but only the apk path
"""
def get_package_name(apkpath):
cmd = 'aapt dump badging {} | grep package:\ name'.format(apkpath)
res = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
out = res.communicate()[0]
l = out.split()
for m in l:
if m.startswith('name'):
return True, m.split('\'')[1]
print ('Either the file is not an apk or aapt is not set in system PATH or aaptpath variable not set where aapt '
'is located')
return False, ''
"""
Check if a package is already installed on a connected device
"""
def is_installed(devid, pkgname):
cmd = 'adb -s {} shell pm list packages | grep {}'.format(devid,pkgname)
res = Popen(cmd,stdout=PIPE,stderr=STDOUT, shell=True)
out = res.communicate()[0]
return pkgname in out
"""
Get device model name from device id of the connected device
"""
def get_model(devid):
cmd = 'adb -s {} shell getprop ro.product.model'.format(devid)
res = Popen(cmd,stdout=PIPE,stderr=STDOUT, shell=True)
out = res.communicate()[0]
return out.strip('\r').strip('\n').strip()
if __name__ == '__main__':
argc = len(argv)
ans = 'n'
if argc != 3:
print ('missing apk path and package name which are mandatory. pass empty arguments \'\' for both instead\n'
'Supported modes are\n'
'python apk_install.py \'\' \'\'\n'
'python apk_install.py <apk path> \'\'\n'
'python apk_install.py \'\' <package name>\n'
'python apk_install.py <apk path> <package name>n')
exit(0)
if argv[1]:
apkpath = str(argv[1])
if argv[2]:
pkgname = str(argv[2])
"""
If the system PATH is not set in bash environment then use the aaptpath and adbpath to specify the
path for adb and aapt executables
"""
path = environ['PATH']
environ['PATH'] = path+':'+aaptpath+':'+adbpath
devmodel = {}
installed = []
r, ds = get_devices()
if not len(ds):
print ('no connected android devices found')
exit(0)
"""
Get the package name from apk file if not provided in command line
"""
if pkgname.strip() == '' and apkpath.strip() != '':
found, pkgname = get_package_name(apkpath)
if not found:
print ('no package found')
exit(0)
for d in ds:
devmodel[d] = get_model(d)
"""
Find out the devices on which the app is already installed
"""
for d in ds:
what = is_installed(d,pkgname)
print ("Installed on {} : {}".format(devmodel[d],what))
if what:
installed.append(d)
"""
Choose to uninstall the app on the devices if already installed
"""
for who in installed:
ans = raw_input('Do you wish to uninstall the app on {}? y/n \r\n'.format(devmodel[who]))
if ans == 'y':
uninstall_apk(who,devmodel[who],pkgname)
else:
ds.remove(who)
"""
Install the apk on only those devices on which app is not installed
"""
if not len(ds):
print ('No devices to install the app on\n')
exit(0)
print ('Installing the apk on devices which don\'t have it installed or just uninstalled')
threads = [Thread(target=install_apk, args=(d,devmodel[d],apkpath)) for d in ds]
for t in threads:
t.start()
|
lisp-rtr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-rtr.py
#
# This file performs LISP Reencapsualting Tunnel Router (RTR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import time
import select
import threading
import pcappy
import os
import copy
import commands
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = None
Ii1IIii11 = None
Oooo0000 = None
if 22 - 22: Ii1I . IiII
I11 = [ ]
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
iiIIIIi1i1 = None
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
def I1Ii ( parameter ) :
global I11
if 66 - 66: Ii1I
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" ,
I11 ) )
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
global I11
if 41 - 41: OoOoOO00
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "RTR" , I11 ,
True ) )
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
if 97 - 97: i11iIiiIii
def II1i1Ii11Ii11 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "RTR" ) )
if 35 - 35: o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
def i1iiI11I ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
if 29 - 29: OoooooooOO
if 23 - 23: o0oOOo0O0Ooo . II111iiii
if 98 - 98: iIii1I11I1II1 % OoOoOO00 * I1ii11iIi11i * OoOoOO00
if 45 - 45: I1Ii111 . OoOoOO00
if 83 - 83: oO0o . iIii1I11I1II1 . I1ii11iIi11i
if 31 - 31: Ii1I . Ii1I - o0oOOo0O0Ooo / OoO0O00 + ooOoO0o * I1IiiI
if 63 - 63: I1Ii111 % i1IIi / OoooooooOO - OoooooooOO
def iIii11I ( kv_pair ) :
OOO0OOO00oo = { "rloc-probe" : False }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
for iii11 in kv_pair . keys ( ) :
O0oo0OO0oOOOo = kv_pair [ iii11 ]
if 35 - 35: IiII % I1IiiI
if ( iii11 == "instance-id" ) :
o0OOoo0OO0OOO = O0oo0OO0oOOOo . split ( "-" )
OOO0OOO00oo [ "instance-id" ] = [ 0 , 0 ]
if ( len ( o0OOoo0OO0OOO ) == 1 ) :
OOO0OOO00oo [ "instance-id" ] [ 0 ] = int ( o0OOoo0OO0OOO [ 0 ] )
OOO0OOO00oo [ "instance-id" ] [ 1 ] = int ( o0OOoo0OO0OOO [ 0 ] )
else :
OOO0OOO00oo [ "instance-id" ] [ 0 ] = int ( o0OOoo0OO0OOO [ 0 ] )
OOO0OOO00oo [ "instance-id" ] [ 1 ] = int ( o0OOoo0OO0OOO [ 1 ] )
if 19 - 19: oO0o % i1IIi % o0oOOo0O0Ooo
if 93 - 93: iIii1I11I1II1 % oO0o * i1IIi
if ( iii11 == "eid-prefix" ) :
Ii11Ii1I = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Ii11Ii1I . store_prefix ( O0oo0OO0oOOOo )
OOO0OOO00oo [ "eid-prefix" ] = Ii11Ii1I
if 72 - 72: iII111i / i1IIi * Oo0Ooo - I1Ii111
if ( iii11 == "rloc-prefix" ) :
Oo0O0O0ooO0O = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Oo0O0O0ooO0O . store_prefix ( O0oo0OO0oOOOo )
OOO0OOO00oo [ "rloc-prefix" ] = Oo0O0O0ooO0O
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if ( iii11 == "rloc-probe" ) :
OOO0OOO00oo [ "rloc-probe" ] = ( O0oo0OO0oOOOo == "yes" )
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
for o0o0oOoOO0 in lisp . lisp_glean_mappings :
if ( o0o0oOoOO0 . has_key ( "eid-prefix" ) ^ OOO0OOO00oo . has_key ( "eid-prefix" ) ) : continue
if ( o0o0oOoOO0 . has_key ( "eid-prefix" ) and OOO0OOO00oo . has_key ( "eid-prefix" ) ) :
iIi1iIiii111 = o0o0oOoOO0 [ "eid-prefix" ]
iIIIi1 = OOO0OOO00oo [ "eid-prefix" ]
if ( iIi1iIiii111 . is_exact_match ( iIIIi1 ) == False ) : continue
if 20 - 20: i1IIi + I1ii11iIi11i - ooOoO0o
if 30 - 30: II111iiii - OOooOOo - i11iIiiIii % OoOoOO00 - II111iiii * Ii1I
if ( o0o0oOoOO0 . has_key ( "rloc-prefix" ) ^ OOO0OOO00oo . has_key ( "rloc-prefix" ) ) : continue
if ( o0o0oOoOO0 . has_key ( "rloc-prefix" ) and OOO0OOO00oo . has_key ( "rloc-prefix" ) ) :
iIi1iIiii111 = o0o0oOoOO0 [ "rloc-prefix" ]
iIIIi1 = OOO0OOO00oo [ "rloc-prefix" ]
if ( iIi1iIiii111 . is_exact_match ( iIIIi1 ) == False ) : continue
if 61 - 61: oO0o - I11i % OOooOOo
if 84 - 84: oO0o * OoO0O00 / I11i - O0
if ( o0o0oOoOO0 . has_key ( "instance-id" ) ^ OOO0OOO00oo . has_key ( "instance-id" ) ) : continue
if ( o0o0oOoOO0 . has_key ( "instance-id" ) and OOO0OOO00oo . has_key ( "instance-id" ) ) :
iIi1iIiii111 = o0o0oOoOO0 [ "instance-id" ]
iIIIi1 = OOO0OOO00oo [ "instance-id" ]
if ( iIi1iIiii111 != iIIIi1 ) : continue
if 30 - 30: iIii1I11I1II1 / ooOoO0o - I1Ii111 - II111iiii % iII111i
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
if 95 - 95: i1IIi
if 3 - 3: I1Ii111 - O0 / I1Ii111 % OoO0O00 / I1Ii111 . I1IiiI
return
if 50 - 50: IiII
if 14 - 14: I11i % OoO0O00 * I11i
if 16 - 16: OoOoOO00 . ooOoO0o + i11iIiiIii
if 38 - 38: IiII * OOooOOo . o0oOOo0O0Ooo
if 98 - 98: OoooooooOO + iII111i . OoOoOO00
lisp . lisp_glean_mappings . append ( OOO0OOO00oo )
if 67 - 67: i11iIiiIii - i1IIi % I1ii11iIi11i . O0
if 77 - 77: IiII / I1IiiI
if 15 - 15: IiII . iIii1I11I1II1 . OoooooooOO / i11iIiiIii - Ii1I . i1IIi
if 33 - 33: I11i . o0oOOo0O0Ooo
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
def oOOo0 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "RTR" ) )
if 54 - 54: O0 - IiII % OOooOOo
if 77 - 77: OoOoOO00 / I1IiiI / OoO0O00 + OoO0O00 . OOooOOo
if 38 - 38: I1Ii111
if 7 - 7: O0 . iII111i % I1ii11iIi11i - I1IiiI - iIii1I11I1II1
if 36 - 36: IiII % ooOoO0o % Oo0Ooo - I1ii11iIi11i
if 22 - 22: iIii1I11I1II1 / Oo0Ooo * I1ii11iIi11i % iII111i
if 85 - 85: oO0o % i11iIiiIii - iII111i * OoooooooOO / I1IiiI % I1IiiI
def IIiIi1iI ( mc , parms ) :
i1IiiiI1iI , Oo0O0O0ooO0O , i1iIi , ooOOoooooo = parms
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
III1Iiii1I11 = "{}:{}" . format ( Oo0O0O0ooO0O . print_address_no_iid ( ) , i1iIi )
Ii11Ii1I = lisp . green ( mc . print_eid_tuple ( ) , False )
IIII = "Changed '{}' translated address:port to {} for EID {}, {} {}" . format ( ooOOoooooo , lisp . red ( III1Iiii1I11 , False ) , Ii11Ii1I , "{}" , "{}" )
if 32 - 32: OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
for IIi1I11I1II in mc . rloc_set :
if ( IIi1I11I1II . rle ) :
for OooOoooOo in IIi1I11I1II . rle . rle_nodes :
if ( OooOoooOo . rloc_name != ooOOoooooo ) : continue
OooOoooOo . store_translated_rloc ( Oo0O0O0ooO0O , i1iIi )
ii11IIII11I = OooOoooOo . address . print_address_no_iid ( ) + ":" + str ( OooOoooOo . translated_port )
if 81 - 81: OoOoOO00 / O0 . IiII . I1IiiI
lisp . lprint ( IIII . format ( "RLE" , ii11IIII11I ) )
if 72 - 72: i1IIi / OoO0O00 + OoooooooOO - Oo0Ooo
if 29 - 29: I1ii11iIi11i + oO0o % O0
if 10 - 10: I11i / I1Ii111 - I1IiiI * iIii1I11I1II1 - I1IiiI
if ( IIi1I11I1II . rloc_name != ooOOoooooo ) : continue
if 97 - 97: I1ii11iIi11i + I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
ii11IIII11I = IIi1I11I1II . rloc . print_address_no_iid ( ) + ":" + str ( IIi1I11I1II . translated_port )
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if ( lisp . lisp_crypto_keys_by_rloc_encap . has_key ( ii11IIII11I ) ) :
O0ooO0Oo00o = lisp . lisp_crypto_keys_by_rloc_encap [ ii11IIII11I ]
lisp . lisp_crypto_keys_by_rloc_encap [ III1Iiii1I11 ] = O0ooO0Oo00o
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
IIi1I11I1II . delete_from_rloc_probe_list ( mc . eid , mc . group )
IIi1I11I1II . store_translated_rloc ( Oo0O0O0ooO0O , i1iIi )
IIi1I11I1II . add_to_rloc_probe_list ( mc . eid , mc . group )
lisp . lprint ( IIII . format ( "RLOC" , ii11IIII11I ) )
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if ( lisp . lisp_rloc_probing ) :
oOo0oO = None if ( mc . group . is_null ( ) ) else mc . eid
OOOO0oo0 = mc . eid if ( mc . group . is_null ( ) ) else mc . group
lisp . lisp_send_map_request ( i1IiiiI1iI , 0 , oOo0oO , OOOO0oo0 , IIi1I11I1II )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
if 47 - 47: iII111i - Ii1I . II111iiii + OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
def Ii1I1Ii ( mc , parms ) :
if 69 - 69: I1IiiI / o0oOOo0O0Ooo . IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if ( mc . group . is_null ( ) ) : return ( IIiIi1iI ( mc , parms ) )
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if ( mc . source_cache == None ) : return ( True , parms )
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
mc . source_cache . walk_cache ( IIiIi1iI , parms )
return ( True , parms )
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
def o0o0O0O00oOOo ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( Ii1I1Ii ,
[ sockets , rloc , port , hostname ] )
return
if 14 - 14: OoOoOO00 + oO0o
if 52 - 52: OoooooooOO - ooOoO0o
if 74 - 74: iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
def I1111i ( lisp_packet , thread_name ) :
global II1iII1i , iIIii , o00O0O
global OOo , Ii1IIii11
global oO0oIIII
global iiIIIIi1i1
if 20 - 20: i1IIi - ooOoO0o
i1iI = lisp_packet
Oo0O0 = i1iI . is_lisp_packet ( i1iI . packet )
if 82 - 82: II111iiii % I11i / OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / I1Ii111
if 70 - 70: oO0o
if 59 - 59: o0oOOo0O0Ooo % oO0o
if 6 - 6: iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if ( Oo0O0 == False ) :
o0Oo0oO0oOO00 = i1iI . packet
oo00OO0000oO , I1II1 , i1iIi , oooO = lisp . lisp_is_rloc_probe ( o0Oo0oO0oOO00 , - 1 )
if ( o0Oo0oO0oOO00 != oo00OO0000oO ) :
if ( I1II1 == None ) : return
lisp . lisp_parse_packet ( II1iII1i , oo00OO0000oO , I1II1 , i1iIi , oooO )
return
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
i1iI . packet = lisp . lisp_reassemble ( i1iI . packet )
if ( i1iI . packet == None ) : return
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if ( lisp . lisp_flow_logging ) : i1iI = copy . deepcopy ( i1iI )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if ( Oo0O0 ) :
if ( i1iI . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
i1iI . print_packet ( "Receive-({})" . format ( thread_name ) , True )
i1iI . strip_outer_headers ( )
else :
if ( i1iI . decode ( False , None , None ) == None ) : return
i1iI . print_packet ( "Receive-({})" . format ( thread_name ) , False )
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if ( Oo0O0 and i1iI . lisp_header . get_instance_id ( ) == 0xffffff ) :
oOoOOo0O = lisp . lisp_control_header ( )
oOoOOo0O . decode ( i1iI . packet )
if ( oOoOOo0O . is_info_request ( ) ) :
OOOooo = lisp . lisp_info ( )
OOOooo . decode ( i1iI . packet )
OOOooo . print_info ( )
if 94 - 94: OoooooooOO + Oo0Ooo / OoOoOO00 * OOooOOo
if 69 - 69: ooOoO0o % oO0o
if 50 - 50: OoooooooOO % I11i
if 49 - 49: oO0o - i11iIiiIii . I1Ii111 * Ii1I % iII111i + i1IIi
if 71 - 71: o0oOOo0O0Ooo
IIIIiIiIi1 = OOOooo . hostname if ( OOOooo . hostname != None ) else ""
I11iiiiI1i = i1iI . outer_source
iI1i11 = i1iI . udp_sport
if ( lisp . lisp_store_nat_info ( IIIIiIiIi1 , I11iiiiI1i , iI1i11 ) ) :
o0o0O0O00oOOo ( II1iII1i , IIIIiIiIi1 , I11iiiiI1i , iI1i11 )
if 66 - 66: O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / Ii1I + I1ii11iIi11i
else :
I1II1 = i1iI . outer_source . print_address_no_iid ( )
oooO = i1iI . outer_ttl
i1iI = i1iI . packet
if ( lisp . lisp_is_rloc_probe_request ( i1iI [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( i1iI [ 28 ] ) == False ) : oooO = - 1
i1iI = i1iI [ 28 : : ]
lisp . lisp_parse_packet ( II1iII1i , i1iI , I1II1 , 0 , oooO )
if 86 - 86: o0oOOo0O0Ooo
return
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if ( Oo0O0 ) :
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( i1iI . packet ) )
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if ( i1iI . inner_dest . is_mac ( ) ) :
i1iI . packet = lisp . lisp_mac_input ( i1iI . packet )
if ( i1iI . packet == None ) : return
i1iI . encap_port = lisp . LISP_VXLAN_DATA_PORT
elif ( i1iI . inner_version == 4 ) :
i1iI . packet = lisp . lisp_ipv4_input ( i1iI . packet )
if ( i1iI . packet == None ) : return
i1iI . inner_ttl = i1iI . outer_ttl
elif ( i1iI . inner_version == 6 ) :
i1iI . packet = lisp . lisp_ipv6_input ( i1iI )
if ( i1iI . packet == None ) : return
i1iI . inner_ttl = i1iI . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if ( i1iI . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( i1iI , ed = "decap" ) == False ) : return
i1iI . outer_source . afi = lisp . LISP_AFI_NONE
i1iI . outer_dest . afi = lisp . LISP_AFI_NONE
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
Iii1iiIi1II , OO0O00oOo = lisp . lisp_allow_gleaning ( i1iI . inner_source ,
i1iI . outer_source )
if ( Iii1iiIi1II ) :
lisp . lisp_glean_map_cache ( i1iI . inner_source , i1iI . outer_source ,
i1iI . udp_sport )
if 14 - 14: I1IiiI
IIiIiI1I , OO0O00oOo = lisp . lisp_allow_gleaning ( i1iI . inner_dest , None )
i1iI . gleaned_dest = IIiIiI1I
if 100 - 100: iIii1I11I1II1 + OoOoOO00 / Oo0Ooo . i11iIiiIii
if 14 - 14: o0oOOo0O0Ooo * OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
Oo00o0OO0O00o = lisp . lisp_map_cache_lookup ( i1iI . inner_source , i1iI . inner_dest )
if 82 - 82: I11i + OoooooooOO - i1IIi . i1IIi
if 6 - 6: o0oOOo0O0Ooo / I11i / II111iiii
if 27 - 27: OOooOOo * ooOoO0o . I1Ii111 % IiII * IiII . i1IIi
if 72 - 72: OOooOOo % I1ii11iIi11i + OoO0O00 / oO0o + IiII
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if ( Oo00o0OO0O00o and ( Oo00o0OO0O00o . action == lisp . LISP_NATIVE_FORWARD_ACTION or
Oo00o0OO0O00o . eid . address == 0 ) ) :
OOOoOoO = lisp . lisp_db_for_lookups . lookup_cache ( i1iI . inner_source , False )
if ( OOOoOoO and OOOoOoO . secondary_iid ) :
iIIIII1ii1I = i1iI . inner_dest
iIIIII1ii1I . instance_id = OOOoOoO . secondary_iid
if 13 - 13: i11iIiiIii + i1IIi * iIii1I11I1II1 % OoooooooOO - II111iiii * OOooOOo
Oo00o0OO0O00o = lisp . lisp_map_cache_lookup ( i1iI . inner_source , iIIIII1ii1I )
if ( Oo00o0OO0O00o ) :
i1iI . gleaned_dest = Oo00o0OO0O00o . gleaned
else :
IIiIiI1I , OO0O00oOo = lisp . lisp_allow_gleaning ( iIIIII1ii1I , None )
i1iI . gleaned_dest = IIiIiI1I
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
if 24 - 24: i11iIiiIii % iIii1I11I1II1 + OOooOOo / i11iIiiIii
if 70 - 70: OoO0O00 * O0 . I11i + I1IiiI . IiII
if 14 - 14: iIii1I11I1II1 % iIii1I11I1II1 * i11iIiiIii - OoO0O00 - I11i
if 63 - 63: OoO0O00
if 69 - 69: iIii1I11I1II1 . I1ii11iIi11i % ooOoO0o + iIii1I11I1II1 / O0 / I1ii11iIi11i
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
if 60 - 60: ooOoO0o * iIii1I11I1II1 * I1ii11iIi11i * Oo0Ooo
if ( Oo00o0OO0O00o == None and IIiIiI1I ) :
lisp . lprint ( "Suppress Map-Request for gleaned EID {}" . format ( lisp . green ( i1iI . inner_dest . print_address ( ) , False ) ) )
if 69 - 69: Ii1I * O0 . i11iIiiIii / Ii1I . o0oOOo0O0Ooo
return
if 63 - 63: I11i + o0oOOo0O0Ooo . II111iiii - I1IiiI
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if ( Oo00o0OO0O00o == None or Oo00o0OO0O00o . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( i1iI . inner_source ,
i1iI . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
i1iI . inner_source , i1iI . inner_dest , None )
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
O00 = "map-cache miss"
lisp . lisp_trace_append ( i1iI , reason = O00 , lisp_socket = I11iiiiI1i )
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
return
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if ( Oo00o0OO0O00o and Oo00o0OO0O00o . is_active ( ) and Oo00o0OO0O00o . has_ttl_elapsed ( ) and
Oo00o0OO0O00o . gleaned == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( Oo00o0OO0O00o . print_eid_tuple ( ) , False ) ) )
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
i1iI . inner_source , i1iI . inner_dest , None )
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
Oo00o0OO0O00o . stats . increment ( len ( i1iI . packet ) )
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
IIi1ii1Ii , OoOoO , o0 , ii1i , oOOoo , IIi1I11I1II = Oo00o0OO0O00o . select_rloc ( i1iI , None )
if 14 - 14: o0oOOo0O0Ooo * oO0o
if 81 - 81: Ii1I * o0oOOo0O0Ooo + I1Ii111 + Oo0Ooo - OoooooooOO
if ( IIi1ii1Ii == None and oOOoo == None ) :
if ( ii1i == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
i1iI . send_packet ( OOo , i1iI . inner_dest )
if 32 - 32: Ii1I * O0
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
O00 = "not an EID"
lisp . lisp_trace_append ( i1iI , reason = O00 , lisp_socket = I11iiiiI1i )
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
return
if 92 - 92: ooOoO0o
O00 = "No reachable RLOCs found"
lisp . dprint ( O00 )
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
lisp . lisp_trace_append ( i1iI , reason = O00 , lisp_socket = I11iiiiI1i )
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
return
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if ( IIi1ii1Ii and IIi1ii1Ii . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if 92 - 92: I11i . I1Ii111
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
O00 = "drop action"
lisp . lisp_trace_append ( i1iI , reason = O00 , lisp_socket = I11iiiiI1i )
if 85 - 85: I1ii11iIi11i . I1Ii111
return
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
i1iI . outer_tos = i1iI . inner_tos
i1iI . outer_ttl = i1iI . inner_ttl
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if ( IIi1ii1Ii ) :
i1iI . encap_port = OoOoO
if ( OoOoO == 0 ) : i1iI . encap_port = lisp . LISP_DATA_PORT
i1iI . outer_dest . copy_address ( IIi1ii1Ii )
iI1iIIIi1i = i1iI . outer_dest . afi_to_version ( )
i1iI . outer_version = iI1iIIIi1i
if 89 - 89: iIii1I11I1II1
i11iiiiI1i = iiIIIIi1i1 if ( iI1iIIIi1i == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 37 - 37: OOooOOo / OoooooooOO - i11iIiiIii
i1iI . outer_source . copy_address ( i11iiiiI1i )
if 18 - 18: iII111i . I1IiiI
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
if ( lisp . lisp_trace_append ( i1iI , rloc_entry = IIi1I11I1II ,
lisp_socket = I11iiiiI1i ) == False ) : return
if 40 - 40: O0 - OoooooooOO - IiII
if 37 - 37: OoOoOO00 / II111iiii / O0
if 76 - 76: I1IiiI . ooOoO0o - I1ii11iIi11i - iII111i * OoO0O00
if 54 - 54: IiII + O0 + I11i * I1Ii111 - OOooOOo % oO0o
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if ( i1iI . encode ( o0 ) == None ) : return
if ( len ( i1iI . packet ) <= 1500 ) : i1iI . print_packet ( "Send" , True )
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
i1i1ii111 = Ii1IIii11 if iI1iIIIi1i == 6 else OOo
i1iI . send_packet ( i1i1ii111 , i1iI . outer_dest )
if 3 - 3: II111iiii / OOooOOo + IiII . ooOoO0o . OoO0O00
elif ( oOOoo ) :
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
I1II = len ( i1iI . packet )
for I1iIiI11I1 in oOOoo . rle_forwarding_list :
i1iI . outer_dest . copy_address ( I1iIiI11I1 . address )
i1iI . encap_port = lisp . LISP_DATA_PORT if I1iIiI11I1 . translated_port == 0 else I1iIiI11I1 . translated_port
if 27 - 27: Ii1I . i11iIiiIii % I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
iI1iIIIi1i = i1iI . outer_dest . afi_to_version ( )
i1iI . outer_version = iI1iIIIi1i
i11iiiiI1i = lisp . lisp_myrlocs [ 0 ] if ( iI1iIIIi1i == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 38 - 38: OoOoOO00 / iII111i % Oo0Ooo
i1iI . outer_source . copy_address ( i11iiiiI1i )
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if ( i1iI . is_trace ( ) ) :
I11iiiiI1i = oO0oIIII
O00 = "replicate"
if ( lisp . lisp_trace_append ( i1iI , reason = O00 , lisp_socket = I11iiiiI1i ) == False ) : return
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if ( i1iI . encode ( None ) == None ) : return
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
i1iI . print_packet ( "Replicate-to-L{}" . format ( I1iIiI11I1 . level ) , True )
i1iI . send_packet ( OOo , i1iI . outer_dest )
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
ii1Ii1IiIIi = len ( i1iI . packet ) - I1II
i1iI . packet = i1iI . packet [ ii1Ii1IiIIi : : ]
if 83 - 83: I11i / I1ii11iIi11i
if ( lisp . lisp_flow_logging ) : i1iI = copy . deepcopy ( i1iI )
if 34 - 34: I1IiiI * Oo0Ooo * I1Ii111 / OoO0O00 * I11i / iIii1I11I1II1
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
del ( i1iI )
return
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
def II1i1i1iII1 ( lisp_thread ) :
lisp . lisp_set_exception ( )
while ( True ) :
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
i1iI = lisp_thread . input_queue . get ( )
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if 45 - 45: OoOoOO00
lisp_thread . input_stats . increment ( len ( i1iI ) )
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
lisp_thread . lisp_packet . packet = i1iI
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
I1111i ( lisp_thread . lisp_packet , lisp_thread . thread_name )
if 74 - 74: Oo0Ooo
return
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
def OO0OO00oo0 ( thread ) :
iIIIiIii = ( time . time ( ) % thread . number_of_pcap_threads )
return ( int ( iIIIiIii ) == thread . thread_number )
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
def OoO000O ( parms , not_used , packet ) :
if ( OO0OO00oo0 ( parms [ 1 ] ) == False ) : return
if 94 - 94: OoOoOO00 . O0 / Ii1I . I1ii11iIi11i - i1IIi
iIi1III1I = parms [ 0 ]
oo0oo0OOOOOoO = parms [ 1 ]
Oo0000O0OOooO = oo0oo0OOOOOoO . number_of_worker_threads
if 54 - 54: I11i / I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
oo0oo0OOOOOoO . input_stats . increment ( len ( packet ) )
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
o0OO0O0Oo = 4 if iIi1III1I == "lo0" else ( 14 if lisp . lisp_is_macos ( ) else 16 )
packet = packet [ o0OO0O0Oo : : ]
if 78 - 78: OoOoOO00 / Oo0Ooo - OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if ( Oo0000O0OOooO ) :
ii1I11iIiIII1 = oo0oo0OOOOOoO . input_stats . packet_count % Oo0000O0OOooO
ii1I11iIiIII1 = ii1I11iIiIII1 + ( len ( I11 ) - Oo0000O0OOooO )
oOO0OOOOoooO = I11 [ ii1I11iIiIII1 ]
oOO0OOOOoooO . input_queue . put ( packet )
else :
oo0oo0OOOOOoO . lisp_packet . packet = packet
I1111i ( oo0oo0OOOOOoO . lisp_packet , oo0oo0OOOOOoO . thread_name )
if 22 - 22: I11i + iIii1I11I1II1
return
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
def OooOOo0 ( lisp_thread ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 51 - 51: OoOoOO00
iIi1III1I = "lo0" if lisp . lisp_is_macos ( ) else "any"
I11IIIiIi11 = pcappy . open_live ( iIi1III1I , 9000 , 0 , 100 )
if 39 - 39: Ii1I % O0 % OoOoOO00 . i1IIi
if 86 - 86: OoO0O00 * OoooooooOO
if 71 - 71: iIii1I11I1II1 - OOooOOo . I1IiiI % OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
IiI = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
IiI = ( IiI != "" and IiI [ 0 ] == " " )
if 34 - 34: I11i % ooOoO0o . O0 . iIii1I11I1II1
oo = "(dst host "
i1II1I = ""
for III1Iiii1I11 in lisp . lisp_get_all_addresses ( ) :
oo += "{} or " . format ( III1Iiii1I11 )
i1II1I += "{} or " . format ( III1Iiii1I11 )
if 95 - 95: OoO0O00 - OOooOOo / II111iiii % I1ii11iIi11i . o0oOOo0O0Ooo
oo = oo [ 0 : - 4 ]
oo += ") and ((udp dst port 4341 or 8472 or 4789) or "
oo += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0))))"
if 24 - 24: i1IIi . i11iIiiIii
if 16 - 16: Oo0Ooo % I1ii11iIi11i + I11i - O0 . iII111i / I1Ii111
if 35 - 35: oO0o / I1Ii111 / II111iiii - iIii1I11I1II1 + II111iiii . I1Ii111
if 81 - 81: iII111i * OOooOOo - I1ii11iIi11i * Ii1I % OoOoOO00 * OoOoOO00
if 59 - 59: iIii1I11I1II1
if 7 - 7: OOooOOo * I1IiiI / o0oOOo0O0Ooo * i11iIiiIii
i1II1I = i1II1I [ 0 : - 4 ]
oo += ( " or (not (src host {}) and " + "((udp src port 4342 and ip[28] == 0x28) or " + "(udp dst port 4342 and ip[28] == 0x12)))" ) . format ( i1II1I )
if 84 - 84: OOooOOo . iII111i
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
if ( IiI ) :
oo += " or (dst net 0.0.0.0/0 and not (host {}))" . format ( i1II1I )
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
lisp . lprint ( "Capturing packets for: '{}'" . format ( oo ) )
I11IIIiIi11 . filter = oo
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
I11IIIiIi11 . loop ( - 1 , OoO000O , [ iIi1III1I , lisp_thread ] )
return
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
def oO0oooooo ( ) :
lisp . lisp_set_exception ( )
if 65 - 65: IiII + Oo0Ooo
if 59 - 59: OoooooooOO + I11i . I1Ii111 - O0 % iIii1I11I1II1 / O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
for O0ooO0Oo00o in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for II in O0ooO0Oo00o : del ( II )
if 87 - 87: ooOoO0o + o0oOOo0O0Ooo
lisp . lisp_crypto_keys_by_nonce . clear ( )
lisp . lisp_crypto_keys_by_nonce = { }
if 28 - 28: OOooOOo * I1ii11iIi11i / oO0o
if 64 - 64: oO0o - I1IiiI / iII111i - OoO0O00
if 37 - 37: i11iIiiIii / iII111i
if 85 - 85: i11iIiiIii + I1Ii111 * OoOoOO00
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 1 - 1: i1IIi / Oo0Ooo . OoO0O00
if 57 - 57: I11i . Oo0Ooo + II111iiii
if 43 - 43: I1Ii111 % iII111i
if 69 - 69: iII111i % OoO0O00
if 86 - 86: oO0o / oO0o
lisp . lisp_rtr_nat_trace_cache . clear ( )
lisp . lisp_rtr_nat_trace_cache = { }
if 28 - 28: i11iIiiIii / o0oOOo0O0Ooo . iIii1I11I1II1 / II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
Oooo0000 = threading . Timer ( 60 , oO0oooooo , [ ] )
Oooo0000 . start ( )
return
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
def iIIi1iiI1i11 ( ) :
global Oo0oO0oo0oO00 , II1iII1i , II1Ii1iI1i
global OOo , Ii1IIii11 , I11
global i111I , oO0oIIII
global iiIIIIi1i1
if 56 - 56: OoooooooOO
lisp . lisp_i_am ( "rtr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "RTR starting up" )
if 30 - 30: i11iIiiIii + oO0o
if 38 - 38: IiII . Ii1I
if 24 - 24: o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
if 12 - 12: iII111i . IiII . OoOoOO00 / O0
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 58 - 58: o0oOOo0O0Ooo - II111iiii % oO0o + I1Ii111 . OoOoOO00 / IiII
if 8 - 8: I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
iiIIIIi1i1 = lisp . lisp_myrlocs [ 0 ]
if ( lisp . lisp_on_aws ( ) ) :
iiIIIIi1i1 = lisp . lisp_get_interface_address ( "eth0" )
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
oOo0OooOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
II1Ii1iI1i = lisp . lisp_open_listen_socket ( oOo0OooOo ,
str ( iiI1iIiI ) )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lisp-rtr" )
i111I = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
if 51 - 51: I11i . Oo0Ooo
II1iII1i [ 0 ] = II1Ii1iI1i
if 45 - 45: i1IIi - Oo0Ooo / O0 . I1ii11iIi11i
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
II1iII1i [ 2 ] = Oo0oO0oo0oO00
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
OOo = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
OOo . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
II1iII1i . append ( OOo )
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
if 66 - 66: Oo0Ooo
oO0oIIII = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( lisp . LISP_TRACE_PORT ) )
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if ( lisp . lisp_is_raspbian ( ) == False ) :
Ii1IIii11 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
O00Iii1111III111 = os . getenv ( "LISP_PCAP_THREADS" )
O00Iii1111III111 = 1 if ( O00Iii1111III111 == None ) else int ( O00Iii1111III111 )
Ii1 = os . getenv ( "LISP_WORKER_THREADS" )
Ii1 = 0 if ( Ii1 == None ) else int ( Ii1 )
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
for OO00OO0o0 in range ( O00Iii1111III111 ) :
oOOOooOo0O = lisp . lisp_thread ( "pcap-{}" . format ( OO00OO0o0 ) )
oOOOooOo0O . thread_number = OO00OO0o0
oOOOooOo0O . number_of_pcap_threads = O00Iii1111III111
oOOOooOo0O . number_of_worker_threads = Ii1
I11 . append ( oOOOooOo0O )
threading . Thread ( target = OooOOo0 , args = [ oOOOooOo0O ] ) . start ( )
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 % OoOoOO00 + I1ii11iIi11i / i1IIi % II111iiii + OOooOOo
if 91 - 91: iIii1I11I1II1 % OoO0O00 . o0oOOo0O0Ooo + Ii1I + o0oOOo0O0Ooo
if 95 - 95: Ii1I + I1ii11iIi11i * OOooOOo
if 16 - 16: I11i / I1IiiI + OoO0O00 % iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
for OO00OO0o0 in range ( Ii1 ) :
oOOOooOo0O = lisp . lisp_thread ( "worker-{}" . format ( OO00OO0o0 ) )
I11 . append ( oOOOooOo0O )
threading . Thread ( target = II1i1i1iII1 , args = [ oOOOooOo0O ] ) . start ( )
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
lisp . lisp_load_checkpoint ( )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
Oooo0000 = threading . Timer ( 60 , oO0oooooo , [ ] )
Oooo0000 . start ( )
return ( True )
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
def oo0ooooO ( ) :
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lisp-rtr" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "" )
lisp . lisp_close_socket ( i111I , "lispers.net-itr" )
OOo . close ( )
return
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
def o0O0O0 ( kv_pair ) :
global II1iII1i
global iiI1iIiI
if 55 - 55: O0 - I1Ii111
lispconfig . lisp_map_resolver_command ( kv_pair )
if 58 - 58: OoOoOO00 - iII111i - OoooooooOO
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 96 - 96: iIii1I11I1II1
return
if 82 - 82: OoOoOO00 + O0 - IiII % oO0o * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo
if 39 - 39: OOooOOo / I1ii11iIi11i / I1IiiI * I1Ii111
if 44 - 44: O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / O0 - I11i
if 83 - 83: IiII * I11i / Oo0Ooo
if 32 - 32: o0oOOo0O0Ooo + OoOoOO00 - OoooooooOO
if 39 - 39: OoooooooOO * OOooOOo * O0 . I11i . OoO0O00 + ooOoO0o
if 9 - 9: OoOoOO00 + oO0o % OoooooooOO + o0oOOo0O0Ooo
def ooOO0o ( kv_pair ) :
global II1Ii1iI1i , OOo , iiI1iIiI
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
ii1111Ii1i = lisp . lisp_rloc_probing
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
lispconfig . lisp_xtr_command ( kv_pair )
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if ( ii1111Ii1i == False and lisp . lisp_rloc_probing ) :
i1IiiiI1iI = [ II1Ii1iI1i , II1Ii1iI1i ,
None , OOo ]
lisp . lisp_start_rloc_probe_timer ( 1 , i1IiiiI1iI )
OOO0OOO00oo = { "type" : "itr-crypto-port" , "port" : iiI1iIiI }
lisp . lisp_write_to_dp_socket ( OOO0OOO00oo )
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
o0oO0Oo = {
"lisp xtr-parameters" : [ ooOO0o , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ o0O0O0 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp rtr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp database-mapping" : [ i1iiI11I , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp glean-mapping" : [ iIii11I , {
"instance-id" : [ False ] ,
"eid-prefix" : [ True ] ,
"rloc-prefix" : [ True ] ,
"rloc-probe" : [ True , "yes" , "no" ] } ] ,
"show rtr-rloc-probing" : [ oOOo0 , { } ] ,
"show rtr-keys" : [ II1i1Ii11Ii11 , { } ] ,
"show rtr-map-cache" : [ I1Ii , { } ] ,
"show rtr-map-cache-dns" : [ IIiiIiI1 , { } ]
}
if 71 - 71: o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I % i11iIiiIii - ooOoO0o
if 82 - 82: I1Ii111 - OOooOOo + OoO0O00
if 64 - 64: o0oOOo0O0Ooo . O0 * Ii1I + OoooooooOO - Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
def o00 ( lisp_socket ) :
if 46 - 46: iIii1I11I1II1 * I1Ii111 - iIii1I11I1II1 . OoOoOO00 - I1Ii111
if 5 - 5: OoooooooOO . I1IiiI . OoOoOO00 % I1ii11iIi11i / iII111i
if 11 - 11: i1IIi % OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
i1II1i , I1II1 , i1iIi , i1iI = lisp . lisp_receive ( lisp_socket , False )
I1iIiiiI1 = lisp . lisp_trace ( )
if ( I1iIiiiI1 . decode ( i1iI ) == False ) : return
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
I1iIiiiI1 . rtr_cache_nat_trace ( I1II1 , i1iIi )
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if ( iIIi1iiI1i11 ( ) == False ) :
lisp . lprint ( "lisp_rtr_startup() failed" )
lisp . lisp_print_banner ( "RTR abnormal exit" )
exit ( 1 )
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
Oo00o0O0O = [ II1Ii1iI1i , Oo0oO0oo0oO00 ,
i111I , oO0oIIII ]
o0ooO0OoOo = [ II1Ii1iI1i ] * 3
if 99 - 99: OoOoOO00
while ( True ) :
try : oO00OoOo , OoO , i111i = select . select ( Oo00o0O0O , [ ] , [ ] )
except : break
if 46 - 46: OoO0O00 * Oo0Ooo % oO0o + O0 * IiII
if 34 - 34: OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if ( lisp . lisp_ipc_data_plane and i111I in oO00OoOo ) :
lisp . lisp_process_punt ( i111I , II1iII1i ,
iiI1iIiI )
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
if 28 - 28: I1IiiI
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if ( oO0oIIII in oO00OoOo ) :
o00 ( oO0oIIII )
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if ( II1Ii1iI1i in oO00OoOo ) :
i1II1i , I1II1 , i1iIi , i1iI = lisp . lisp_receive ( o0ooO0OoOo [ 0 ] ,
False )
if ( I1II1 == "" ) : break
if ( lisp . lisp_is_rloc_probe_request ( i1iI [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: I1Ii111
if ( lisp . lisp_is_rloc_probe_reply ( i1iI [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 72 - 72: iII111i * OOooOOo
lisp . lisp_parse_packet ( o0ooO0OoOo , i1iI , I1II1 , i1iIi )
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if ( Oo0oO0oo0oO00 in oO00OoOo ) :
i1II1i , I1II1 , i1iIi , i1iI = lisp . lisp_receive ( Oo0oO0oo0oO00 , True )
if 98 - 98: OoOoOO00 % II111iiii
if ( I1II1 == "" ) : break
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if ( i1II1i == "command" ) :
if ( i1iI == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if ( i1iI . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( i1iI )
continue
if 68 - 68: o0oOOo0O0Ooo
lispconfig . lisp_process_command ( Oo0oO0oo0oO00 , i1II1i ,
i1iI , "lisp-rtr" , [ o0oO0Oo ] )
elif ( i1II1i == "api" ) :
lisp . lisp_process_api ( "lisp-rtr" , Oo0oO0oo0oO00 , i1iI )
elif ( i1II1i == "data-packet" ) :
I1111i ( i1iI , "" )
else :
if ( lisp . lisp_is_rloc_probe_request ( i1iI [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe request, using pcap" )
continue
if 20 - 20: I1Ii111 - I1Ii111
if ( lisp . lisp_is_rloc_probe_reply ( i1iI [ 0 ] ) ) :
lisp . lprint ( "RTR ignoring RLOC-probe reply, using pcap" )
continue
if 37 - 37: IiII
lisp . lisp_parse_packet ( II1iII1i , i1iI , I1II1 , i1iIi )
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
oo0ooooO ( )
lisp . lisp_print_banner ( "RTR normal exit" )
exit ( 0 )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Widecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a widecoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import WidecoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(WidecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another widecoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another widecoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
cli.py
|
from multiprocessing import Process
from pathlib import Path
from time import sleep
import click
import libtmux
import structlog
import sys
from . import GeneralConfiguration
from . import ServerConfiguration
from . import ClientConfiguration
from .log import configure_logs
from .client import Client
from .repl import Repl
from .server import Server
from .song import Song
logger = structlog.get_logger(__name__)
configure_logs(log_level="INFO") # TODO get from config info if not debug
general_configuration = GeneralConfiguration()
server_configuration = ServerConfiguration()
client_configuration = ClientConfiguration()
# @cli.command() #TODO: finsih this
# @click.option("--config", help="runs configuration by name", type=(str), multiple=True)
# @click.option("--message", help="message to send", default="hello", type=(str))
# def client_config(config, message):
# client_configuration = ClientConfiguration()
# client_processes = []
# for c in config:
# ip = client_configuration[c]["IP"]
# port = client_configuration[c]["PORT"]
#
# logger.info("Running client... ", ip=ip, port=port)
# click.echo("Running client...")
#
# client = Client(ip=ip, port=port)
# client_processes.append(Process(target=client.send, kwargs={"message": message}))
#
# for p in client_processes:
# p.start()
#
# for p in client_processes:
# p.join()
#
def parse_config_args(config): # TODO: need to make a different one for the client side.
CONFIG = server_configuration
ip = CONFIG[config]["IP"]
port = CONFIG[config]["PORT"]
token = CONFIG[config]["VOICE_TOKEN"]
channel = CONFIG[config]["CHANNEL_ID"]
return ip, port, token, channel
def server_instances_from_configuration_file_in_tmux(config):
cli_path = Path(__file__).parent
server = libtmux.Server()
session = server.new_session(session_name="Discorgeous Servers", window_name="Master")
for section in config:
print("building", section) # TODO: LOGGING
ip, port, token, channel = parse_config_args(section)
window = session.new_window(section)
pane = window.select_pane(target_pane=0)
pane.send_keys(
f"python3 {cli_path} server --single --ip {ip} --port {port} --token {token} --channel {channel}"
)
else:
session.attach_session()
def server_instances_from_configuration_file(config):
CONFIG = server_configuration
server_processes = []
for c in config:
logger.info("Building server configuration:", section=config)
ip, port, token, channel = parse_config_args(c)
server = Server(ip=ip, port=port, channel_id=channel, bot_token=token)
server_processes.append(Process(target=server.run))
logger.info(
"Creating server process... ", ip=ip, port=port, channel=channel, token=token[:5]
)
click.echo("Creating server process...")
for p in server_processes:
p.start()
logger.info(
"Starting server process... ", ip=ip, port=port, channel=channel, token=token[:5]
)
for p in server_processes:
p.join()
logger.info(
"Joining server process... ", ip=ip, port=port, channel=channel, token=token[:5]
)
@click.group()
def cli():
pass
@cli.command()
@click.option("--ip", default="127.0.0.1", help="IP address")
@click.option("--port", default="5000", help="Port")
@click.option(
"--single", help="runs one instance of the server. requires --channel --token", is_flag=True
)
@click.option("--config", help="runs configuration by name", type=(str), multiple=True)
@click.option("--tmux", help="runs --config in tmux", is_flag=True, default=False)
@click.option("--channel", help="runs configuration by name", type=(str), multiple=True)
@click.option("--token", help="runs configuration by name", type=(str), multiple=True)
def server(ip, port, channel, token, single, config, tmux):
"""Runs the discord bot server."""
if single:
validate_single = {"channel": channel, "token": token}
for key, arg in validate_single.items():
assert (
len(arg[0]) > 0
), f"{key} is empty. Please add the --{key} flag with the approprate information."
logger.info(
"Running server... ", ip=ip, port=port, channel=channel[0][:5], token=token[0][:5]
)
click.echo("Running server...")
server = Server(ip=ip, port=port, channel_id=channel[0], bot_token=token[0])
server.run()
elif len(config) > 0:
click.echo("running config")
if tmux:
click.echo("running in tmux")
server_instances_from_configuration_file_in_tmux(config)
else:
server_instances_from_configuration_file(config)
else:
click.echo("Please choose a config file or run in single mode.")
@cli.command()
@click.option("--ip", default="127.0.0.1", help="IP address")
@click.option("--port", default="5000", help="Port")
@click.option("--message", help="message to send", default="hello", type=(str))
def client(ip, port, message):
"""Send a single message to server."""
logger.info("Running client... ", ip=ip, port=port)
click.echo("Running client...")
client = Client(ip=ip, port=port)
ack = client.send(message=message)
if ack:
logger.info("Client succesfully send message... ", ip=ip, port=port, ack=ack)
else:
logger.info("Client did not send message... ", ip=ip, port=port, ack=ack)
@cli.command(name="client-ssh")
@click.option("--ip", default="127.0.0.1", help="IP address")
@click.option("--port", default="5000", help="Port")
@click.option("--message", help="message to send", default="hello", type=(str))
def client_ssh(ip, port, message):
"""ssh client not implemented."""
raise NotImplementedError("Work in progress.")
@cli.command()
@click.option("--ip", default="127.0.0.1", help="IP address")
@click.option("--port", default="5000", help="Port")
def repl(ip, port):
"""Repl messages to server."""
logger.info("Running repl... ", ip=ip, port=port)
click.echo("Running repl...")
repl = Repl(ip=ip, port=port)
try:
repl.start()
except KeyboardInterrupt as e:
click.echo(str(e))
finally:
sys.exit()
@cli.command()
@click.option("--ip", default="127.0.0.1", help="IP address")
@click.option("--port", default="5000", help="Port")
def tester(ip, port):
"""Sends test song to server."""
logger.info("Running tester... ", ip=ip, port=port)
click.echo("Running tester...")
song = Song(ip=ip, port=port)
try:
song.start()
except KeyboardInterrupt as e:
click.echo(str(e))
finally:
sys.exit()
# TODO overwrite config command
|
publicDNSv1.py
|
"""
BSD 3-Clause License
Copyright (c) 2019, Antti Koskimäki, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import multiprocessing
import random
import socket
import socketserver
import struct
import sys
import threading
import time
import dns.message
import dns.edns
import dns.name
from socket import IPPROTO_TCP, TCP_NODELAY
from multiprocessing import Queue as mQueue
from multiprocessing import Process
"""
This program is a simple DNS relay for forwarding DNS queries to some DNS
server further in the DNS system. This relay can add ECS client subnet
information to the DNS messages it handles. The parameters for the program
can be set on modifying the global variables below.
"""
# temp default values for variables below
# TODO: adjust and/or put argparse input instead
SERVADDR = ("127.0.0.1", 53)
SERVUDPBFR = 4096
SERVTCPBFR = 4096
CDNSADDR = ("127.0.0.1", 54)
FWDUDPBFR = 4096
FWDTCPBFR = 4096
ECSMASK = 24
DNSTIMEOUT = 3
DNSTRIES = 3
addECS = False
clients = {}
newids = {}
# temp types
# 0 - UDP query
# 1 - UDP answer - normal
# 2 - TCP query
# 3 - TCP answer - normal
# 4 - UDP query for CNAME
# 5 - UDP CNAME-type answer
# 6 - TCP query for CNAME
# 7 - TCP CNAME-type answer
# 8 - UDP answer with trunc flag (resend with TCP)
# Process 1 (UDP server) definition, threads, etc. below:
def process1_UDPsrv(q1, q2):
# tempp
print("P1 - Starting process 1 - UDP sender & server\n")
thread1 = P1T1_UDPServer(q1, SERVADDR)
thread1.start()
time.sleep(1)
thread2 = P1T2_UDPSender(q2, thread1.return_server_socket())
thread2.start()
thread2.join()
thread1.join()
print("P1 - Exiting process 1...\n")
class P1T1_UDPServer(threading.Thread):
def __init__(self, q1, addr):
threading.Thread.__init__(self)
self.q1 = q1
self.addr = addr
self.serversocket = 0
# tempp
print("P1T1 - UDP server thread starting\n")
def run(self):
server = MyUDPServer(self.q1, self.addr, UDPClientHandler)
self.serversocket = server.socket
# tempp
print("P1T1 - Running UDP server loop forever\n")
server.serve_forever(5)
def return_server_socket(self):
return self.serversocket
class P1T2_UDPSender(threading.Thread):
def __init__(self, q2, serversocket):
threading.Thread.__init__(self)
self.q2 = q2
self.serversocket = serversocket
# tempp
print("P1T2 - UDP Sender Thread starting\n")
def run(self):
# tempp
print("P1T2 - UDP Sender listening loop starting\n")
while True:
data = self.q2.get()
self.serversocket.sendto(data[0], data[1])
# tempp
# print("P1T2 - UDP Sender sent reply to client\n")
class UDPClientHandler(socketserver.BaseRequestHandler):
def handle(self):
# tempp
# print("P1 - UDP Server got data\n")
# print(self.request[0])
# print(" from ")
# print(self.client_address)
self.server.q1.put((self.request[0], self.client_address))
# tempp
# print("P1T1 - UDP server fwd sg to q1 - handling done\n")
class MyUDPServer(socketserver.UDPServer):
def __init__(self, q1, *args, **kwargs):
super(MyUDPServer, self).__init__(*args, **kwargs)
self.q1 = q1
# tempp
print("P1T1 - UDP Server starting\n")
# Process 3 (Central Processing) definition, threads, etc. below:
def process2_CP(q1, q2, q3, q4, q5, q6, ecs, mask):
# tempp
print("P2 - Starting process 2\n")
thread1 = P2T1_step1Handler(q1, q3, ecs, mask)
thread1.start()
thread2 = P2T2_tcpAnswHandler(q2, q3, q6)
thread2.start()
thread3 = P2T3_udpAnswHandler(q2, q3, q4, q5)
thread3.start()
thread1.join()
thread2.join()
thread3.join()
class P2T1_step1Handler(threading.Thread):
def __init__(self, q1, q3, ecs, mask):
threading.Thread.__init__(self)
self.q1 = q1
self.q3 = q3
self.ecs = ecs
self.mask = mask
# tempp
print("P2T1 - CP step1Handler thread starting\n")
def run(self):
# tempp
print("P2T1 - CP step1Handler thread listening loop starting\n")
while True:
data = self.q1.get()
dnsmsg = dns.message.from_wire(data[0])
if self.ecs:
tmp_optionlist = []
tmp_optionlist.append(dns.edns.ECSOption(data[1][0],
self.mask,
0))
dnsmsg.use_edns(0, 0, 1280, 1280, tmp_optionlist)
self.q3.put((dnsmsg.to_wire(), data[1], dnsmsg.id, 1))
class P2T2_tcpAnswHandler(threading.Thread):
def __init__(self, q2, q3, q6):
threading.Thread.__init__(self)
self.q2 = q2
self.q3 = q3
self.q6 = q6
# tempp
print("P2T2 - CP tcpAnswHandler thread starting\n")
def run(self):
# tempp
print("P2T2 - CP tcpAnswHandler thread listening loop starting\n")
while True:
data = self.q6.get()
dnsmsg = dns.message.from_wire(data[0])
isCname = False
for x in dnsmsg.answer:
tmp_arr = x.to_text().split()
if "CNAME" in tmp_arr:
cnaddr = (tmp_arr[tmp_arr.index("CNAME") + 1])[:-1]
isCname = True
break
if isCname:
dnsquery = dns.message.make_query(cnaddr,
dns.rdatatype.A)
self.q3.put((dnsquery.to_wire(), data[1], data[2], 3))
else:
dnsmsg.id = data[2]
self.q2.put((dnsmsg.to_wire(), data[1]))
class P2T3_udpAnswHandler(threading.Thread):
def __init__(self, q2, q3, q4, q5):
threading.Thread.__init__(self)
self.q2 = q2
self.q3 = q3
self.q4 = q4
self.q5 = q5
# tempp
print("P2T3 - CP udpAnswHandler thread starting\n")
def run(self):
# tempp
print("P2T3 - CP udpAnswerHandler thread listening loop starting\n")
while True:
data = self.q4.get()
dnsmsg = dns.message.from_wire(data[0])
if (dnsmsg.flags & (1 << 9)):
dnsmsg.flags = dnsmsg.flags & 0b0111110101111111
dnsmsg.id = int(65535 * random.random()) + 1
self.q5.put((dnsmsg.to_wire(), data[1], data[2], 2))
else:
isCname = False
for x in dnsmsg.answer:
tmp_arr = x.to_text().split()
if "CNAME" in tmp_arr:
cnaddr = (tmp_arr[tmp_arr.index("CNAME") + 1])[:-1]
isCname = True
break
if isCname:
dnsquery = dns.message.make_query(cnaddr,
dns.rdatatype.A)
self.q3.put((dnsquery.to_wire(), data[1], data[2], 2))
else:
dnsmsg.id = data[2]
self.q2.put((dnsmsg.to_wire(), data[1]))
# Process 3 - Sender thread towards CDNS below:
def process3_UDPsend(q3, q4, addr, timeout, tries):
# tempp
print("P3 - Starting process 3\n")
print("P3 - Starting listening loop\n")
while True:
data = q3.get()
# tempp
# print("P3 - Creating sender thread\n")
P3TX_Sender(q4, data, addr, timeout, tries).start()
class P3TX_Sender(threading.Thread):
def __init__(self, q4, data, addr, timeout, tries):
threading.Thread.__init__(self)
self.q4 = q4
self.data = data
self.cdnsaddr = addr
self.timeout = timeout
self.tries = tries
# tempp
# print("P3TX - Sender Thread starting\n")
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(2)
try:
sock.sendto(self.data[0], self.cdnsaddr)
reply, addr = sock.recvfrom(2048)
sock.close()
self.q4.put((reply, self.data[1], self.data[2]))
except socket.timeout:
sock.close()
print("UDP sender socket timeout\n")
# tempp
# print("P3TX - Send and received data, forwarding to CP (q4)\n")
def process4_TCPsend(q5, q6, addr, timeout, tries):
# tempp
print("P4 - Starting process 4\n")
print("P4 - Starting listening loop\n")
while True:
data = q5.get()
# tempp
# print("P4 - Creating sender thread\n")
P4TX_Sender(q6, data, addr, timeout, tries).start()
class P4TX_Sender(threading.Thread):
def __init__(self, q6, data, addr, timeout, tries):
threading.Thread.__init__(self)
self.q6 = q6
self.data = data
self.addr = addr
self.timeout = timeout
self.tries = tries
# tempp
# print("P4TX - Sender Thread starting\n")
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
try:
sock.connect(self.addr)
sock.sendall(struct.pack('!H', len(self.data[0])) + self.data[0])
reply = sock.recv(2048)
self.q6.put((reply[2:], self.data[1], self.data[2]))
sock.close()
except socket.timeout:
sock.close()
print("TCP sender socket timeout\n")
# tempp
# print("P4TX - Send and received data, forwarding to CP (q6)\n")
def main():
# TODO: Argparse input
# p1 -> p2
q1 = mQueue()
# p2 -> p1
q2 = mQueue()
# p2 -> p3
q3 = mQueue()
# p3 -> p2
q4 = mQueue()
# p2 -> p4
q5 = mQueue()
# p4 -> p2
q6 = mQueue()
p1 = Process(target=process1_UDPsrv, args=(q1, q2,))
p2 = Process(target=process2_CP, args=(q1,
q2,
q3,
q4,
q5,
q6,
addECS,
ECSMASK))
p3 = Process(target=process3_UDPsend, args=(q3,
q4,
CDNSADDR,
DNSTIMEOUT,
DNSTRIES))
p4 = Process(target=process4_TCPsend, args=(q5,
q6,
CDNSADDR,
DNSTIMEOUT,
DNSTRIES))
p1.start()
p2.start()
p3.start()
p4.start()
try:
p1.join()
p2.join()
p3.join()
p4.join()
except KeyboardInterrupt:
p1.terminate()
p2.terminate()
p3.terminate()
p4.terminate()
# TODO: Remember to flush IP tables
print("--Exiting public DNS server program (Ctrl-C)--\n")
sys.exit()
print("Exiting public DNS server program...\n")
if __name__ == "__main__":
main()
|
server.py
|
import socket
import sys
import time
import threading
import sqlite3
def clientSendThread(connection, user, tCur):
messageTotal = tMessages # set total message to equal the current number of messages sent
while True:
if messageTotal != tMessages: # if a new message has been sent
lastMessage = tCur.execute('''SELECT * FROM tbl1 WHERE rowid = ?;''', [messageTotal + 1])
lastMessage = tCur.fetchall()[0] # 1D array inside of an tuple lol
lastMessage = (lastMessage[1] + ": " + lastMessage[2]+"\n")
try:
sendMessage = lastMessage.encode() # encode the last sent message
connection.send(sendMessage) # send the client the message
except (ConnectionResetError, ConnectionAbortedError) as e:
break;
messageTotal += 1 # increment message count
def clientThread(connection, user):
global tMessages
tCur = con.cursor()
sendThread = threading.Thread(target = clientSendThread, args = (connection,username,tCur)) # Create a thread for sending messages to the user
sendThread.daemon = True; # daemon to true, thread will die when the main process does
sendThread.start() # start
while True:
epoch = round(time.time())
try:
in_message = connection.recv(1024) # wait for a message to be recieved
except (ConnectionResetError, ConnectionAbortedError) as e: # if user closed client
message = (" User " + user + " has disconnected.")
print(message)
lCur = con.cursor()
lCur.execute('''INSERT INTO tbl1 VALUES (?, ?, ?);''', [epoch, "SERVER", message]) #SQL command to run
tMessages += 1
break # leave loop
in_message = in_message.decode('utf-8')
if in_message != "":
recvMessage = user + ": " + in_message
aCur = con.cursor()
aCur.execute('''INSERT INTO tbl1 VALUES (?, ?, ?);''', [epoch, user, in_message]) #SQL command to run
con.commit() # commit it to the database
tMessages += 1 # add message to the message array
print(recvMessage) # print message to concole
else:
break
print("Thread for user ID", clientID, "terminating.") # announce thread termination
sock = socket.socket() # Create Socket
h_name = socket.gethostname()
print("Server will start on host: ", h_name)
mainPort = 1200 # Default Listen Port
sock.bind((h_name, mainPort))
print("Binded host and port successfully")
print("Server is waiting for incoming connections...")
try:
con = sqlite3.connect('test.db', check_same_thread = False)
print("Connected to database")
cur = con.cursor()
except FileNotFoundError:
print("Database not found")
clientID = 0 # ID of the next client to connect
clientRecvThreads = [] # Array of threads for clients.
tMessages = cur.execute('''SELECT Count(*) FROM tbl1;''')
tMessages = cur.fetchone()[0]
print(tMessages)
while True:
sock.listen(1) # Server will wait here for a connection
connection,address = sock.accept()
username = connection.recv(1024) # Recieve the username of the user
username = username.decode('utf-8')
print(username, address, " Has connected to the server...")
clientRecvThreads.append(threading.Thread(target = clientThread, args = (connection,username,))) # Create a thread for the user's session
clientRecvThreads[clientID].daemon = True; # daemon to true, thread will die when the main process does
clientRecvThreads[clientID].start() # start thread
clientID += 1
|
run_robot_library.py
|
import os
from tempfile import mkdtemp
from robot import run
from multiprocessing import Process
from allure_commons_test.report import AllureReport
def run_robot_with_allure(*args, **kwargs):
root = os.path.abspath(os.path.join(__file__, "..", ".."))
targets = map(lambda target: os.path.join(root, target), args)
tmp_path = mkdtemp(dir=os.environ.get('TEST_TMP', '/tmp'))
def run_robot(path, **kw):
# ToDo: fix it (_core not works correctly with multiprocessing)
# import six
# import allure_commons
# if six.PY2:
# reload(allure_commons._core)
# else:
# import importlib
# importlib.reload(allure_commons._core)
#
#
from allure_robotframework import allure_robotframework
listener = allure_robotframework(logger_path=tmp_path)
stdout_file = os.path.abspath(os.path.join(tmp_path, "..", "stdout.txt"))
output_path = os.path.abspath(os.path.join(tmp_path, ".."))
with open(stdout_file, 'w+') as stdout:
options = {"listener": listener, "outputdir": output_path, "stdout": stdout, "extension": "rst"}
options.update(kw)
run(path, **options)
robot_process = Process(target=run_robot, args=targets, kwargs=kwargs)
robot_process.start()
robot_process.join()
return AllureReport(tmp_path)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test tiajiansd shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
collect_server_info.py
|
#!/usr/bin/env python
import getopt
import os
import platform
import subprocess
import sys
import time
from datetime import datetime
from threading import Thread
sys.path = [".", "lib"] + sys.path
from testconstants import WIN_COUCHBASE_BIN_PATH_RAW
import TestInput
def usage(error=None):
print("""\
Syntax: collect_server_info.py [options]
Options
-l Lower case of L. For local run on windows only. No need ini file.
-i <file> Path to .ini file containing cluster information.
-p <key=val,...> Comma-separated key=value info.
Available keys:
path=<file_path> The destination path you want to put your zipped diag file
Example:
collect_server_info.py -i cluster.ini -p path=/tmp/nosql
""")
sys.exit(error)
def time_stamp():
now = datetime.now()
day = now.day
month = now.month
year = now.year
hour = now.timetuple().tm_hour
min = now.timetuple().tm_min
date_time = "%s%02d%02d-%02d%02d" % (year, month, day, hour, min)
return date_time
class couch_dbinfo_Runner(object):
def __init__(self, server, path, local=False):
self.server = server
self.path = path
self.local = local
def run(self):
file_name = "%s-%s-couch-dbinfo.txt" % (self.server.ip.replace('[', '').replace(']', '').replace(':', '.'),
time_stamp())
if not self.local:
from lib.remote.remote_util import RemoteMachineShellConnection
remote_client = RemoteMachineShellConnection(self.server)
print("Collecting dbinfo from %s\n" % self.server.ip)
output, error = remote_client.execute_couch_dbinfo(file_name)
print("\n".join(output))
print("\n".join(error))
user_path = "/home/"
if remote_client.info.distribution_type.lower() == 'mac':
user_path = "/Users/"
else:
if self.server.ssh_username == "root":
user_path = "/"
remote_path = "%s%s" % (user_path, self.server.ssh_username)
status = remote_client.file_exists(remote_path, file_name)
if not status:
raise Exception("%s doesn't exists on server" % file_name)
status = remote_client.get_file(remote_path, file_name,
"%s/%s" % (self.path, file_name))
if status:
print("Downloading dbinfo logs from %s" % self.server.ip)
else:
raise Exception("Fail to download db logs from %s"
% self.server.ip)
remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
remote_client.disconnect()
class cbcollectRunner(object):
def __init__(self, server, path, local=False):
self.server = server
self.path = path
self.local = local
def run(self):
file_name = "%s-%s-diag.zip" % (self.server.ip.replace('[', '').replace(']', '').replace(':', '.'),
time_stamp())
if not self.local:
from lib.remote.remote_util import RemoteMachineShellConnection
remote_client = RemoteMachineShellConnection(self.server)
print("Collecting logs from %s\n" % self.server.ip)
output, error = remote_client.execute_cbcollect_info(file_name)
print("\n".join(error))
user_path = "/home/"
if remote_client.info.distribution_type.lower() == 'mac':
user_path = "/Users/"
else:
if self.server.ssh_username == "root":
user_path = "/"
remote_path = "%s%s" % (user_path, self.server.ssh_username)
status = remote_client.file_exists(remote_path, file_name)
if not status:
raise Exception("%s doesn't exists on server" % file_name)
status = remote_client.get_file(remote_path, file_name,
"%s/%s" % (self.path, file_name))
if status:
print("Downloading zipped logs from %s" % self.server.ip)
else:
raise Exception("Fail to download zipped logs from %s"
% self.server.ip)
remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
remote_client.disconnect()
def main():
local = False
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hli:p', [])
for o, a in opts:
if o == "-h":
usage()
elif o == "-l":
if platform.system() == "Windows":
print("*** windows os ***")
local = True
else:
print("This option '-l' only works for local windows.")
sys.exit()
if not local:
input = TestInput.TestInputParser.get_test_input(sys.argv)
if not input.servers:
usage("ERROR: no servers specified. Please use the -i parameter.")
except IndexError:
usage()
except getopt.GetoptError as error:
usage("ERROR: " + str(error))
if not local:
file_path = input.param("path", ".")
remotes = (cbcollectRunner(server, file_path, local) for server in input.servers)
remote_threads = [Thread(target=remote.run()) for remote in remotes]
for remote_thread in remote_threads:
remote_thread.daemon = True
remote_thread.start()
run_time = 0
while remote_thread.isAlive() and run_time < 1200:
time.sleep(15)
run_time += 15
print("Waiting for another 15 seconds (time-out after 20 min)")
if run_time == 1200:
print("cbcollect_info hung on this node. Jumping to next node")
print("collect info done")
for remote_thread in remote_threads:
remote_thread.join(120)
if remote_thread.isAlive():
raise Exception("cbcollect_info hung on remote node")
else:
file_name = "%s-%s-diag.zip" % ("local", time_stamp())
cbcollect_command = WIN_COUCHBASE_BIN_PATH_RAW + "cbcollect_info.exe"
result = subprocess.check_call([cbcollect_command, file_name])
if result == 0:
print("Log file name is \n %s" % file_name)
else:
print("Failed to collect log")
if __name__ == "__main__":
main()
|
threads_show_ver.py
|
#!/usr/bin/env python
"""
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
"""
from __future__ import print_function, unicode_literals
import threading
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list as devices
def show_version(a_device):
"""Execute show version command using Netmiko."""
print()
print("#" * 80)
remote_conn = ConnectHandler(**a_device)
output = remote_conn.send_command_expect("show version")
remote_conn.disconnect()
print(output)
print("#" * 80)
print()
def main():
"""
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
"""
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print(some_thread)
some_thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time))
if __name__ == "__main__":
main()
|
job_runner.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import threading
import time
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import FLContextKey, RunProcessKey, SystemComponents, WorkspaceConstants
from nvflare.apis.fl_context import FLContext
from nvflare.apis.job_def import Job, RunStatus
from nvflare.private.admin_defs import Message
from nvflare.private.defs import RequestHeader, TrainingTopic
from nvflare.private.fed.utils.fed_utils import deploy_app
class JobRunner(FLComponent):
def __init__(self, workspace_root: str) -> None:
super().__init__()
self.workspace_root = workspace_root
self.ask_to_stop = False
self.scheduler = None
self.running_jobs = {}
self.lock = threading.Lock()
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.SYSTEM_START:
engine = fl_ctx.get_engine()
self.scheduler = engine.get_component(SystemComponents.JOB_SCHEDULER)
def _deploy_job(self, job: Job, sites: list, fl_ctx: FLContext) -> str:
"""deploy the application to the list of participants
Args:
job: job to be deployed
sites: participating sites
fl_ctx: FLContext
Returns:
"""
engine = fl_ctx.get_engine()
run_number = job.job_id
workspace = os.path.join(self.workspace_root, WorkspaceConstants.WORKSPACE_PREFIX + run_number)
count = 1
while os.path.exists(workspace):
work_folder = run_number + "_" + str(count)
workspace = os.path.join(self.workspace_root, WorkspaceConstants.WORKSPACE_PREFIX + work_folder)
count += 1
for app_name, participants in job.get_deployment().items():
app_data = job.get_application(app_name, fl_ctx)
if not participants:
participants = ["server"]
participants.extend([client.name for client in engine.get_clients()])
client_sites = []
for p in participants:
if p == "server":
success = deploy_app(app_name=app_name, site_name="server", workspace=workspace, app_data=app_data)
self.log_info(
fl_ctx, f"Application {app_name} deployed to the server for run:{run_number}", fire_event=False
)
if not success:
raise RuntimeError("Failed to deploy the App to the server")
else:
if p in sites:
client_sites.append(p)
self._deploy_clients(app_data, app_name, run_number, client_sites, engine)
display_sites = ",".join(client_sites)
self.log_info(
fl_ctx,
f"Application {app_name} deployed to the clients: {display_sites} for run:{run_number}",
fire_event=False,
)
self.fire_event(EventType.JOB_DEPLOYED, fl_ctx)
return run_number
def _deploy_clients(self, app_data, app_name, run_number, client_sites, engine):
# deploy app to all the client sites
admin_server = engine.server.admin_server
message = Message(topic=TrainingTopic.DEPLOY, body=app_data)
message.set_header(RequestHeader.RUN_NUM, run_number)
message.set_header(RequestHeader.APP_NAME, app_name)
replies = self._send_to_clients(admin_server, client_sites, engine, message)
if not replies:
raise RuntimeError("Failed to deploy the App to the clients")
def _send_to_clients(self, admin_server, client_sites, engine, message):
clients, invalid_inputs = engine.validate_clients(client_sites)
requests = {}
for c in clients:
requests.update({c.token: message})
replies = admin_server.send_requests(requests, timeout_secs=admin_server.timeout)
return replies
def _start_run(self, run_number, job: Job, client_sites: dict, fl_ctx: FLContext):
"""Start the application
Args:
run_number: run_number
client_sites: participating sites
fl_ctx: FLContext
Returns:
"""
engine = fl_ctx.get_engine()
job_clients = engine.get_job_clients(client_sites)
err = engine.start_app_on_server(run_number, job_id=job.job_id, job_clients=job_clients)
if err:
raise RuntimeError("Could not start the server App.")
replies = engine.start_client_job(run_number, client_sites)
if not replies:
raise RuntimeError("Failed to start the App to the clients")
display_sites = ",".join(list(client_sites.keys()))
self.log_info(fl_ctx, f"Started run: {run_number} for clients: {display_sites}")
self.fire_event(EventType.JOB_STARTED, fl_ctx)
def _stop_run(self, run_number, fl_ctx: FLContext):
"""Stop the application
Args:
run_number: run_number to be stopped
fl_ctx: FLContext
Returns:
"""
engine = fl_ctx.get_engine()
run_process = engine.run_processes.get(run_number)
if run_process:
admin_server = engine.server.admin_server
client_sites = run_process.get(RunProcessKey.PARTICIPANTS)
message = Message(topic=TrainingTopic.ABORT, body="")
message.set_header(RequestHeader.RUN_NUM, str(run_number))
self.log_debug(fl_ctx, f"Send stop command to the site for run:{run_number}")
replies = self._send_to_clients(admin_server, client_sites, engine, message)
if not replies:
self.log_error(fl_ctx, f"Failed to send abort command to clients for run_{run_number}")
err = engine.abort_app_on_server(run_number)
if err:
self.log_error(fl_ctx, f"Failed to abort the server for run_.{run_number}")
def _job_complete_process(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
while not self.ask_to_stop:
for run_number in list(self.running_jobs.keys()):
if run_number not in engine.run_processes.keys():
with self.lock:
job = self.running_jobs.get(run_number)
if job:
job_manager.set_status(job.job_id, RunStatus.FINISHED_COMPLETED, fl_ctx)
del self.running_jobs[run_number]
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job.job_id)
self.fire_event(EventType.JOB_COMPLETED, fl_ctx)
self.log_debug(fl_ctx, f"Finished running job:{job.job_id}")
time.sleep(1.0)
def run(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
threading.Thread(target=self._job_complete_process, args=[fl_ctx]).start()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
if job_manager:
while not self.ask_to_stop:
# approved_jobs = job_manager.get_jobs_by_status(RunStatus.APPROVED, fl_ctx)
approved_jobs = job_manager.get_jobs_by_status(RunStatus.SUBMITTED, fl_ctx)
if self.scheduler:
(ready_job, sites) = self.scheduler.schedule_job(job_candidates=approved_jobs, fl_ctx=fl_ctx)
if ready_job:
try:
self.log_info(fl_ctx, f"Got the job:{ready_job.job_id} from the scheduler to run")
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, ready_job.job_id)
run_number = self._deploy_job(ready_job, sites, fl_ctx)
job_manager.set_status(ready_job.job_id, RunStatus.DISPATCHED, fl_ctx)
self._start_run(run_number, ready_job, sites, fl_ctx)
with self.lock:
self.running_jobs[run_number] = ready_job
job_manager.set_status(ready_job.job_id, RunStatus.RUNNING, fl_ctx)
except Exception as e:
self.log_error(fl_ctx, f"Failed to run the Job ({ready_job.job_id}): {e}")
time.sleep(1.0)
else:
self.log_error(fl_ctx, "There's no Job Manager defined. Won't be able to run the jobs.")
def restore_running_job(self, run_number: str, job_id: str, job_clients, snapshot, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
engine.start_app_on_server(run_number, job_id=job_id, job_clients=job_clients, snapshot=snapshot)
try:
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
job = job_manager.get_job(jid=job_id, fl_ctx=fl_ctx)
with self.lock:
self.running_jobs[run_number] = job
except:
self.log_error(fl_ctx, f"Failed to restore the job:{job_id} to the running job table.")
def stop_run(self, run_number: str, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
job_manager = engine.get_component(SystemComponents.JOB_MANAGER)
with self.lock:
self._stop_run(run_number, fl_ctx)
job = self.running_jobs.get(run_number)
if job:
self.log_info(fl_ctx, f"Stop the job run:{run_number}")
fl_ctx.set_prop(FLContextKey.CURRENT_JOB_ID, job.job_id)
job_manager.set_status(job.job_id, RunStatus.FINISHED_ABORTED, fl_ctx)
del self.running_jobs[run_number]
self.fire_event(EventType.JOB_ABORTED, fl_ctx)
def stop_all_runs(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
for run_number in engine.run_processes.keys():
self.stop_run(run_number, fl_ctx)
self.log_info(fl_ctx, "Stop all the running jobs.")
self.ask_to_stop = True
|
runner.py
|
import configparser
import datetime
import glob
import h5py
import json
import os
import requests
import smtplib
import subprocess
import sys
import time
import threading
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
config = configparser.ConfigParser()
config.read(os.environ["SMTP_SETTINGS"])
config = config["DEFAULT"]
def reporter(host, id):
while True:
hdf5_files = glob.glob("*.hdf5")
if len(hdf5_files):
try:
if len(hdf5_files) > 1:
for file in hdf5_files[:-1]:
os.remove(file)
with h5py.File(hdf5_files[-1], "r") as f:
requests.post(
f"{host}/runs/update",
json={
"id": id,
"progress": {
"iteration": int(hdf5_files[-1].split(".")[0]),
"pop": f["pop"][:].tolist(),
"fit": f["fit"][:].tolist(),
},
},
)
os.remove(hdf5_files[-1])
continue
except:
pass
time.sleep(0.1)
def run(
cl,
re,
ma,
n_c,
n_t,
gen=100,
tolx=1e-8,
tolf=1e-8,
fix_te=True,
t_te_min=0.0,
t_c_min=0.01,
r_le_min=0.05,
A_cs_min=None,
A_bins_min=None,
Cm_max=None,
strategy="rand-to-best/1/exp/random",
f=None,
cr=None,
adaptivity=2,
n_proc=28,
run_name=None,
report=False,
**kwargs,
):
"""
Solve the specified optimization problem and handle reporting of results.
Parameters
----------
cl : float
Design lift coefficient
re : float
Reynolds number
ma : float
Mach number
n_c, n_t : int
Number of CST coefficients for the chord line and thickness distribution, respectively
gen : int, optional
Number of generations to use for the genetic algorithm. 100 by default
tolx : float, optional
Tolerance on the spread of the design vectors.
tolf: float, optional
Tolerance on the spread of objective functions.
fix_te : bool, optional
True if the trailing edge thickness should be fixed. True by default
t_te_min : float, optional
Minimum TE thickness as fraction of chord length. Default is 0.0.
t_c_min : float or None, optional
Minimum thickness over chord ratio. None if unconstrained. Defaults is 0.01.
r_le_min : float or None, optional
Minimum leading edge radius. None if unconstrained. Defaults is 0.05.
A_cs_min : float or None, optional
Minimum cross sectional area. None if unconstrained. Default is None.
A_bins_min : float or None, optional
Minimum relative area of the airfoil in each bin along the chord. None if unconstrained. Default is None.
Cm_max : float or None, optional
Maximum absolute moment coefficient. None if unconstrained. Default is None.
strategy : string, optional
Evolution strategy to use. Default is 'rand-to-best/1/exp/random'.
f : float or None, optional
Mutation rate
cr : float or None, optional
Crossover rate
adaptivity : 0, 1, or 2
Which kind of self-adaptivity to ue (0: none, 1: simple, 2: complex)
n_proc : int, optional
Number of processors to use to evaluate functions in parallel using MPI. 28 by default
run_name : str, optional
Name of the run. If None, an ISO formatted UTC timestamp will be used.
report : bool, optional
True if the results should be reported via email.
"""
returncode = -1
try:
if run_name is None:
now = datetime.datetime.utcnow()
run_name = (
now.isoformat(timespec="seconds").replace("-", "").replace(":", "")
+ "Z"
)
path = os.path.join(os.path.abspath(os.environ["RESULTS_DIR"]), run_name)
os.makedirs(path)
repr_file = os.path.join(path, "repr.yml")
dat_file = os.path.join(path, "optimized.dat")
png_file = os.path.join(path, "optimized.png")
log_file = os.path.join(path, "log.txt")
cmd = [
"mpirun",
"-np",
str(n_proc),
"python3",
"-u",
"-m",
"af_opt.problem",
str(cl),
str(re),
str(ma),
str(n_c),
str(n_t),
str(gen),
str(tolx),
str(tolf),
str(fix_te),
str(t_te_min),
str(t_c_min),
str(r_le_min),
str(A_cs_min),
str(A_bins_min),
str(Cm_max),
strategy,
str(f),
str(cr),
str(adaptivity),
str(repr_file),
str(dat_file),
str(png_file),
]
print(f"Going to run the following command: \n{cmd}")
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
for line in process.stdout:
s = line.decode("utf-8")
sys.stdout.write(s)
with open(log_file, "a+") as log:
log.write(s)
with open(log_file, "r") as f:
all_text = f.read()
with open(log_file, "w") as f:
f.write(all_text.replace("\n\n", "\n"))
process.communicate()
returncode = process.returncode
if report:
print("Going to send an email")
msg = MIMEMultipart()
msg["From"] = config["user"]
msg["To"] = config["receiver"]
msg["Subject"] = "Airfoil Optimization Complete!"
with open(repr_file, "r") as f:
msg.attach(MIMEText(f.read(), "plain"))
f.seek(0)
attachment = MIMEText(f.read(), _subtype="yaml")
attachment.add_header(
"Content-Disposition",
"attachment",
filename=os.path.basename(repr_file),
)
msg.attach(attachment)
with open(png_file, "rb") as fp:
attachment = MIMEImage(fp.read(), _subtype="png")
attachment.add_header(
"Content-Disposition",
"attachment",
filename=os.path.basename(png_file),
)
msg.attach(attachment)
with open(dat_file, "r") as f:
attachment = MIMEText(f.read())
attachment.add_header(
"Content-Disposition",
"attachment",
filename=os.path.basename(dat_file),
)
msg.attach(attachment)
with open(log_file, "r", encoding="utf-8") as f:
attachment = MIMEText(f.read())
attachment.add_header(
"Content-Disposition",
"attachment",
filename=os.path.basename(log_file),
)
msg.attach(attachment)
with smtplib.SMTP_SSL(config["host"], int(config["port"])) as server:
server.ehlo()
server.login(config["user"], config["password"])
server.sendmail(config["user"], config["receiver"], msg.as_string())
print("Email sent")
except Exception as e:
print(e)
return returncode
def main():
"""
Poll runs service for new run cases and run them.
"""
host = os.environ["RUNS_SERVICE_URL"]
while True:
try:
r = requests.get(f"{host}/runs/accept")
if r.status_code == 204:
time.sleep(1)
continue
response_object = r.json()
id = response_object["data"]["id"]
kwargs = dict(response_object["data"])
print(f"Got a request to start a run with the following data: \n{kwargs}")
thread = threading.Thread(target=reporter, args=(host, id))
thread.start()
returncode = run(**kwargs, host=host)
print(f"Returncode: {returncode}")
requests.post(
f"{host}/runs/complete", json={"id": id, "success": returncode == 0}
)
except requests.exceptions.ConnectionError as e:
pass
except json.decoder.JSONDecodeError:
print(f"Invalid response from server: {r}")
except TypeError:
print("Invalid run case")
requests.post(f"{host}/runs/complete", json={"id": id, "success": False})
if __name__ == "__main__":
main()
|
experiment.py
|
from multiprocessing import Process, Value
import os
import iotlabcli.parser.node
import argparse
import iotlabcli.experiment
from iotlabcli import helpers
from iotlabcli import rest
from iotlabcli import auth
from iotlabcli import profile
from iotlabcli.parser import common, help_msgs
from measurement import *
from sniffer import *
from helpers import *
EXPERIMENT_NAME = "experiment"
PROFILE_NAME = "experimentprofile"
def submit_experiment(resources, max_duration, exp_id_result, start_time=None, power_average=None):
print(resources)
print("Started")
user, passwd = auth.get_user_credentials()
api = rest.Api(user, passwd)
if power_average:
m3_prof = profile.ProfileM3(PROFILE_NAME, 'dc')
m3_prof.set_consumption(140, power_average, True, True, True)
m3_prof.set_radio('sniffer',[20])
api.add_profile(PROFILE_NAME,m3_prof)
result = iotlabcli.experiment.submit_experiment(api, EXPERIMENT_NAME, max_duration, [resources], start_time=start_time)
print(result)
exp_id_result.value = result['id']
print(exp_id_result.value)
print("Waiting")
result = iotlabcli.experiment.wait_experiment(api, exp_id_result.value)
print(result)
def stop_all(site,node_type,nodes):
user, passwd = auth.get_user_credentials()
api = rest.Api(user, passwd)
blocking = blocking_experiments(site,node_type,nodes)
for exp in blocking:
eid = exp['id']
print("Stopping "+str(eid))
result = api.stop_experiment(eid)
print(result)
print("Waiting for stopping "+str(eid))
result = iotlabcli.experiment.wait_experiment(api, eid, states = 'Terminated,Error')
print(result)
def finish(exp_id):
print("Finish experiments")
user, passwd = auth.get_user_credentials()
api = rest.Api(user, passwd)
result = iotlabcli.experiment.stop_experiment(api, exp_id.value)
print(result)
def initialize(site, node_type, experiment_nodes, max_duration, logdir, sniffer_nodes = [], start_time=None, power_average=None, force=None, wait=None):
if not os.path.exists(logdir) or not os.path.isdir(logdir):
print("%s does not exist or is not a directory"%logdir)
return None
# Parse arguments
if force is None or wait is None:
parser = argparse.ArgumentParser()
parser.add_argument('--force','-f', action='store_true', help='Stop blocking experiments before submitting the new one')
parser.add_argument('--wait','-w', action='store_true', help='Wait if experiment is blocked by other experiments or unavailable nodes')
args = parser.parse_args()
force = args.force
wait = args.wait
# Stop all blocking experiments
all_nodes = experiment_nodes + sniffer_nodes
if force:
stop_all(site,node_type,all_nodes)
# Check if available
blocking = blocking_experiments(site,node_type,all_nodes)
if len(blocking) > 0:
print("The following experiments block the new experiment")
print(blocking)
if not wait:
return None
if not all_alive(site,node_type,all_nodes):
if not wait:
return None
exp_id = Value('i',0) # will be filled by submit_experiment
if power_average:
total_resources = resources(site,node_type,experiment_nodes+sniffer_nodes,PROFILE_NAME)
else:
total_resources = resources(site,node_type,experiment_nodes+sniffer_nodes)
experiment_resources = resources(site,node_type,experiment_nodes)
print(total_resources)
# Initialize the experiments
labaction = Process(target=submit_experiment, args=(total_resources,max_duration,exp_id,start_time,power_average))
labaction.start()
return exp_id,labaction,experiment_resources
def run_all(site, node_type, experiment_nodes, runs, image, max_duration, logdir, sniffer_nodes = [], start_time=None, timeout_minutes=15, power_average=None, force=None, wait=None):
result = initialize(site, node_type, experiment_nodes, max_duration, logdir, sniffer_nodes, start_time, power_average, force, wait)
if result is None:
return
else:
exp_id,labaction,experiment_resources = result
# Assert that names are unique
assert(len(runs) == len(set([x['name'] for x in runs])))
sniffer_initialized = False
for run in runs:
run['image'] = image
run['logdir'] = logdir
# Start the build
build_successful = Value('b',False)
build = Process(target=building, args=(run,build_successful))
build.start()
# Wait for labaction and build before proceeding
labaction.join()
build.join()
if not sniffer_initialized:
init_sniffer(run,site,node_type,sniffer_nodes,exp_id)
sniffer_initialized = True
if not build_successful.value:
print("Build not successful")
finish(exp_id)
return False
# Flash nodes
success = flash(run,site,node_type,run['image'],exp_id,experiment_resources['nodes'])
if not success:
print("Flashing nodes was not successful")
finish(exp_id)
return False
# Run experiment
labaction = Process(target=run_measurement, args=(run,node_type,exp_id,experiment_resources['nodes'],timeout_minutes,power_average))
labaction.start()
# Wait for last labaction then finish
labaction.join()
finish(exp_id)
return True
|
tcp.py
|
# -*- coding: utf-8 -*-
"""
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import os
import socket
import sys
import threading
import time
import traceback
import weakref
# Import Salt Libs
import salt.crypt
import salt.exceptions
# Import Tornado Libs
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
from salt.exceptions import SaltClientError, SaltReqTimeoutError
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.transport import iter_transport_opts
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
state["socket_queue"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"socket_queue": self.socket_queue,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncTCPReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncTCPReqChannel for %s", key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if "master_uri" in kwargs:
opts["master_uri"] = kwargs["master_uri"]
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
opts["master_uri"],
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get("resolver")
parse = urlparse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={
"io_loop": self.io_loop,
"resolver": resolver,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_ret_port"),
},
)
def close(self):
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self.io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self.io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self.io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout
)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
"""
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
try:
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except salt.ext.tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError("Connection to master lost")
raise salt.ext.tornado.gen.Return(ret)
class AsyncTCPPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
async_methods = [
"send_id",
"connect_callback",
"connect",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event("minion", opts=self.opts, listen=False)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "message_client"):
self.message_client.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def send_id(self, tok, force_auth):
"""
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
"""
load = {"id": self.opts["id"], "tok": tok}
@salt.ext.tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise salt.ext.tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while (
count <= self.opts["tcp_authentication_retries"]
or self.opts["tcp_authentication_retries"] < 0
):
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event({"master": self.opts["master"]}, "__master_connected")
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get("__role") == "syndic":
data = "Syndic {0} started at {1}".format(
self.opts["id"], time.asctime()
)
tag = salt.utils.event.tagify([self.opts["id"], "start"], "syndic")
else:
data = "Minion {0} started at {1}".format(
self.opts["id"], time.asctime()
)
tag = salt.utils.event.tagify([self.opts["id"], "start"], "minion")
load = {
"id": self.opts["id"],
"cmd": "_minion_event",
"pretag": None,
"tok": self.tok,
"data": data,
"tag": tag,
}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,), loop_kwarg="io_loop",
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info(
"fire_master failed: master could not be contacted. Request timed out."
)
except Exception: # pylint: disable=broad-except
log.info("fire_master failed: %s", traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event({"master": self.opts["master"]}, "__master_disconnected")
@salt.ext.tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b"salt")
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4505)) != 4505:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts["master_ip"], int(self.publish_port),),
kwargs={
"io_loop": self.io_loop,
"connect_callback": self.connect_callback,
"disconnect_callback": self.disconnect_callback,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_publish_port"),
},
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in six.text_type(exc):
raise SaltClientError(
"Unable to sign_in to master: {0}".format(exc)
) # TODO: better error message
def on_recv(self, callback):
"""
Register an on_recv callback
"""
if callback is None:
return self.message_client.on_recv(callback)
@salt.ext.tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
six.reraise(*sys.exc_info())
self._socket.close()
self._socket = None
if hasattr(self.req_server, "shutdown"):
try:
self.req_server.shutdown()
except Exception as exc: # pylint: disable=broad-except
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
elif hasattr(self.req_server, "stop"):
try:
self.req_server.stop()
except socket.error as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
"""
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=self.io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, header, payload):
"""
Handle incoming messages from underlying tcp streams
"""
try:
try:
payload = self._decode_payload(payload)
except Exception: # pylint: disable=broad-except
stream.write(salt.transport.frame.frame_msg("bad load", header=header))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(
payload.get("load"), dict
):
yield stream.write(
salt.transport.frame.frame_msg(
"payload and load must be a dict", header=header
)
)
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if str("\0") in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {0} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if (
payload["enc"] == "clear"
and payload.get("load", {}).get("cmd") == "_auth"
):
yield stream.write(
salt.transport.frame.frame_msg(
self._auth(payload["load"]), header=header
)
)
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.write("Some exception handling minion payload")
log.error(
"Some exception handling a payload from minion", exc_info=True
)
stream.close()
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == "send":
stream.write(
salt.transport.frame.frame_msg(
self.crypticle.dumps(ret), header=header
)
)
elif req_fun == "send_private":
stream.write(
salt.transport.frame.frame_msg(
self._encrypt_private(ret, req_opts["key"], req_opts["tgt"],),
header=header,
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.write("Server-side exception handling payload")
stream.close()
except salt.ext.tornado.gen.Return:
raise
except salt.ext.tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error("Connection was unexpectedly closed", exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error("Unexpected exception occurred: %s", exc, exc_info=True)
raise salt.ext.tornado.gen.Return()
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer, object):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
self._shutting_down = False
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, header, framed_msg["body"]
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
if self._shutting_down:
return
self._shutting_down = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except socket.error as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(
SaltMessageClient, opts, args=args, kwargs=kwargs
)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@salt.ext.tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise salt.ext.tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = salt.ext.tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "_stream") and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (
self.io_loop
!= salt.ext.tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()
):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop(),
)
self.io_loop.start()
except Exception as e: # pylint: disable=broad-except
log.info("Exception caught in SaltMessageClient.close: %s", str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
"""
Ask for this client to reconnect to the origin
"""
if hasattr(self, "_connecting_future") and not self._connecting_future.done():
future = self._connecting_future
else:
future = salt.ext.tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@salt.ext.tornado.gen.coroutine
def _connect(self):
"""
Try to connect for the rest of time!
"""
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if salt.ext.tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
else:
log.warning(
"If you need a certain source IP/port, consider upgrading Tornado >= 4.5"
)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
self._connecting_future.set_result(True)
break
except Exception as exc: # pylint: disable=broad-except
log.warning("TCP Message Client encountered an exception %r", exc)
yield salt.ext.tornado.gen.sleep(1) # TODO: backoff
# self._connecting_future.set_exception(e)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(
4096, partial=True
)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@salt.ext.tornado.gen.coroutine
def _stream_send(self):
while (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except salt.ext.tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError("Message timed out")
)
def send(self, msg, timeout=None, callback=None, raw=False):
"""
Send given message, and return a future
"""
message_id = self._message_id()
header = {"mid": message_id}
future = salt.ext.tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message_id
)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append(
(message_id, salt.transport.frame.frame_msg(msg, header=header))
)
return future
class Subscriber(object):
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer, object):
"""
TCP publisher
"""
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
"master", opts=self.opts, listen=False
)
def close(self):
if self._closing:
return
self._closing = True
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {"new": [id_], "lost": []}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {"new": [], "lost": [id_]}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg["body"]
if body["enc"] != "aes":
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
load = crypticle.loads(body["load"])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load["id"], load["tok"]):
continue
client.id_ = load["id"]
self._add_client_present(client)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.trace("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug("TCP PubServer sending payload: %s", package)
payload = salt.transport.frame.frame_msg(package["payload"])
to_remove = []
if "topic_lst" in package:
topic_lst = package["topic_lst"]
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug("Publish target %s not connected", topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state["secrets"]
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts, "secrets": salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri, io_loop=self.io_loop, payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
"""
Publish "load" to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
# Use the Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
# TODO: switch to the actual asynchronous interface
# pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient, (pull_uri,), loop_kwarg="io_loop",
)
pub_sock.connect()
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list" and not self.opts.get("order_masters", False):
if isinstance(load["tgt"], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(
load["tgt"], tgt_type=load["tgt_type"]
)
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
else:
int_payload["topic_lst"] = load["tgt"]
# Send it over IPC!
pub_sock.send(int_payload)
|
ex7_processes_show_ver.py
|
#!/usr/bin/env python
'''
Use processes and Netmiko to connect to each of the devices in the database. Execute
'show version' on each device. Record the amount of time required to do this.
'''
from __future__ import print_function, unicode_literals
from netmiko import ConnectHandler
from datetime import datetime
from multiprocessing import Process
import django
django.setup()
from net_system.models import NetworkDevice # noqa
def show_version(a_device):
'''
Execute show version command using Netmiko
'''
creds = a_device.credentials
remote_conn = ConnectHandler(device_type=a_device.device_type,
ip=a_device.ip_address,
username=creds.username,
password=creds.password,
port=a_device.port, secret='')
print()
print('#' * 80)
print(remote_conn.send_command_expect("show version"))
print('#' * 80)
print()
def main():
'''
Use processes and Netmiko to connect to each of the devices in the database. Execute
'show version' on each device. Record the amount of time required to do this.
'''
start_time = datetime.now()
devices = NetworkDevice.objects.all()
procs = []
for a_device in devices:
my_proc = Process(target=show_version, args=(a_device,))
my_proc.start()
procs.append(my_proc)
for a_proc in procs:
print(a_proc)
a_proc.join()
print("\nElapsed time: " + str(datetime.now() - start_time))
if __name__ == "__main__":
main()
|
experiment_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import threading
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
patch = tf.test.mock.patch
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(tf.contrib.learn.Evaluable, tf.contrib.learn.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf.logging.info('evaluate called with args: %s' % kwargs)
self.eval_count += 1
if self.eval_count > self._max_evals:
tf.logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with tf.Session() as sess:
var = tf.Variable(1.0, name='var0')
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf.logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, export_input_fn, **kwargs):
tf.logging.info('export_savedmodel called with args: %s, %s, %s'
% (export_dir_base, export_input_fn, kwargs))
self.export_count += 1
return os.path.join(compat.as_bytes(export_dir_base),
compat.as_bytes('bogus_timestamp'))
class ExperimentTest(tf.test.TestCase):
def setUp(self):
# The official name is tf.train, so tf.training was obliterated.
reveal_undocumented('tensorflow.python.training')
def _cluster_spec(self):
return {
tf.contrib.learn.TaskType.PS: ['host1:2222', 'host2:2222'],
tf.contrib.learn.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': tf.contrib.learn.Environment.CLOUD,
'task': {
'type': tf.contrib.learn.TaskType.WORKER,
'index': 1
}
}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = tf.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=tf.contrib.learn.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([tf.test.mock.call().start()])
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(master='')
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': tf.contrib.learn.Environment.CLOUD,
'task': {
'index': 1
}
}
with patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = tf.contrib.learn.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(1, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertEquals(6, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.1)
def test_run_local(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(
isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
def test_train_and_evaluate(self):
est = TestEstimator()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input')
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, est.export_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(
isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': tf.contrib.learn.TaskType.PS,
'index': 1
}
}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[tf.test.mock.call().start(), tf.test.mock.call().join()])
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = tf.contrib.learn.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEquals(1, count)
if __name__ == '__main__':
tf.test.main()
|
WebSockets.py
|
import socket
import select
import threading
import multiprocessing
import Queue
import time
import collections
import sys
import logging
BUFFER_SIZE = 4096
WEBSOCKET_VERSION = "13"
WEBSOCKET_MAGIC_HANDSHAKE_STRING = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
class WebSocketInitializationException(Exception):
"""Raised when a web socket initialization fails due to bad handshakes or requests"""
pass
class WebSocketUnmaskedCommunicationException(Exception):
"""Raised when a client->server communication is not masked"""
pass
class WebSocketInvalidDataException(Exception):
"""Raised when receiving data goes horribly wrong (namely...it got something unexpected)"""
pass
class WebSocketTransaction:
"""Contains transaction data which is passed through the queues when sending
or receiving data to or from a socket."""
TRANSACTION_NEWSOCKET = 0 #used on the socket notification queue to inform a service it has a new socket with the given id
TRANSACTION_DATA = 1 #used on send/recv queues to send/receive data to/from a socket
TRANSACTION_CLOSE = 2 #used on send/recv queues to close the socket or inform the service the socket has been closed
def __init__(self, transactionType, socketId, data):
self.transactionType = transactionType
self.socketId = socketId
self.data = data
class WebSocketClient:
"""Contains socket information about a client which is connected to the server."""
class WebSocketRecvState:
"""Representation of the state of an in progress receiving operation"""
STATE_TYPE = 0 #this state is used when the type byte is the next thing to receive
STATE_LEN = 1 #this state is used when the length bytes still have some bytes which should be received next
STATE_MASK = 2 #this state is used when the masks still ahve some bytes which should be received next
STATE_PAYLOAD = 3 #this state is used when the payload still has some bytes which should be received next
STATE_DONE = 4 #this state is used when the web socket is done receiving
def __init__(self):
"""Initializes an initial receive state to nothing recieved yet"""
self.typeByte = None
self.lenBytes = bytearray()
self.computedLength = 0
self.maskBytes = bytearray()
self.maskIndex = 0
self.unmaskedPayloadBytes = bytearray()
self.state = WebSocketClient.WebSocketRecvState.STATE_TYPE
def receive(self, receivedBytes):
"""Processes some bytes into this object. Returns the unprocessed bytes.
This operates as a state machine on the bytes as though they are an array. It
processes each byte individually and changes the state depending on the value
of the byte. In the case where there aren't enough bytes to complete a receive
sequence (going from STATE_TYPE to STATE_DONE), it should pick up where it left
off on the next receive."""
byteQueue = collections.deque(receivedBytes)
while len(byteQueue) > 0 and self.state != WebSocketClient.WebSocketRecvState.STATE_DONE:
b = byteQueue.popleft() #pop from the beginning like a queue
if self.state == WebSocketClient.WebSocketRecvState.STATE_TYPE:
#process this byte as the initial type declarer
if b != 0x81:
#this shouldn't be anything but 0x81
raise WebSocketInvalidDataException()
self.typeByte = b
self.state = WebSocketClient.WebSocketRecvState.STATE_LEN
elif self.state == WebSocketClient.WebSocketRecvState.STATE_LEN:
#process this byte as part of the length
if len(self.lenBytes) == 0:
#this is the first byte
if b < 0x80:
#it should have its 8th bit set since we need masked communication
raise WebSocketInvalidDataException()
b = b & 0x7F #unmask it
self.lenBytes.append(b)
#figure out what to do next
if b <= 0x7D:
#this is the only length byte we need. time to move on to masks
self.computedLength = b
self.state = WebSocketClient.WebSocketRecvState.STATE_MASK
#if we haven't changed the state by now, it needs some more information
elif self.lenBytes[0] == 0x7E:
#two bytes length (16 bits)
self.lenBytes.append(b)
if len(self.lenBytes) == 3:
#this was the last one
self.computedLength = ((self.lenBytes[1] & 0xFF) << 8 | (self.lenBytes[2] & 0xFF))
self.state = WebSocketClient.WebSocketRecvState.STATE_MASK
elif self.lenBytes[0] == 0x7F:
#eight bytes length (64 bits)
self.lenBytes.append(b)
if len(self.lenBytes) == 9:
#this was the last one
self.computedLength = (self.lenBytes[1] & 0xFF) << 56
self.computedLength |= (self.lenBytes[2] & 0xFF) << 48
self.computedLength |= (self.lenBytes[3] & 0xFF) << 40
self.computedLength |= (self.lenBytes[4] & 0xFF) << 32
self.computedLength |= (self.lenBytes[5] & 0xFF) << 24
self.computedLength |= (self.lenBytes[6] & 0xFF) << 16
self.computedLength |= (self.lenBytes[7] & 0xFF) << 8
self.computedLength |= self.lenBytes[8] & 0xFF
self.state = WebSocketClient.WebSocketRecvState.STATE_MASK
elif self.state == WebSocketClient.WebSocketRecvState.STATE_MASK:
#process this byte as part of the masks
self.maskBytes.append(b)
if len(self.maskBytes) == 4:
#all masks received
self.state = WebSocketClient.WebSocketRecvState.STATE_PAYLOAD
elif self.state == WebSocketClient.WebSocketRecvState.STATE_PAYLOAD:
#process this byte as part of the payload
b = b ^ self.maskBytes[self.maskIndex]
self.maskIndex = (self.maskIndex + 1) % 4
self.unmaskedPayloadBytes.append(b)
if len(self.unmaskedPayloadBytes) == self.computedLength:
#we are done receiving
self.state = WebSocketClient.WebSocketRecvState.STATE_DONE
#process the remaining bytes into a bytearray and return it
return bytearray(byteQueue)
class WebSocketManager(threading.Thread):
"""Thread which manages communication between WebSockets and their clients.
This asyncronously sends/receives data to/from sockets while at the same time
handling the service send/recv queues in a "switchboard" like fashion."""
def __init__(self, socketList, stopEvent, processDirectory):
"""Initializes a new WebSocketSendRecvThread with the given sockets,
a multiprocessing.Event (stopEvent) to stop the thread gracefully, and
the process directory which will contain all the processes"""
threading.Thread.__init__(self)
self.sockets = {} #sockets are stored sorted by their unique ids
for sock in socketList:
self.sockets[sock.id] = sock
self.socketListLock = threading.Lock()
self.stopEvent = stopEvent
self.processDirectory = processDirectory
def addWebSocket(self, s):
"""Adds a socket to the list to be asyncronously managed. Returns if it was successful"""
if self.isAlive() == False:
#create a new one
return False
else:
#add to the existing one
with self.socketListLock:
self.sockets[s.id] = s
return True
def _stringToFrame(self, data):
"""Turns a string into a WebSocket data frame. Returns a bytes(). 'data' is a string"""
#determine the size of the data we were told to send
rawData = data#bytearray(data, 'ascii')
dataLength = len(rawData)
outputBytes = bytearray()
outputBytes.append(0x81) #0x81 = text data type
if dataLength < 0x7D:
#a nice short length
outputBytes.append(len(rawData))
elif dataLength >= 0x7E and len(rawData) < 0xFFFF:
#two additional bytes of length needed
outputBytes.append(0x7E)
outputBytes.append(dataLength >> 8 & 0xFF)
outputBytes.append(dataLength & 0xFF)
else:
#eight additional bytes of length needed
outputBytes.append(0x7F)
outputBytes.append(dataLength >> 56 & 0xFF)
outputBytes.append(dataLength >> 48 & 0xFF)
outputBytes.append(dataLength >> 40 & 0xFF)
outputBytes.append(dataLength >> 32 & 0xFF)
outputBytes.append(dataLength >> 24 & 0xFF)
outputBytes.append(dataLength >> 16 & 0xFF)
outputBytes.append(dataLength >> 8 & 0xFF)
outputBytes.append(dataLength & 0xFF)
#tack on the raw data now
for byte in rawData:
outputBytes.append(ord(byte))
return bytes(outputBytes)
def _sendToSocket(self, data, sock):
"""Sends some bytes to a socket and returns the remaining bytes or none if it was all sent"""
nSent = sock.send(data)
if nSent == len(data):
return None
else:
return data[nSent:] #if we didn't send the whole thing, return from the last index sent to the end
def __queueHelper(self):
"""Thread method to operate the "switchboard" between server queues and the individual socket queues"""
while self.stopEvent.is_set() == False:
processes = self.processDirectory.getAllProcesses()
for pid in processes:
#read through the sendQueue in this process and send it to the appropriate sockets
process = processes[pid]
while process.sendQueue.empty() == False:
try:
transaction = process.sendQueue.get_nowait()
with self.socketListLock:
if transaction.socketId in self.sockets:
self.sockets[transaction.socketId].sendQueue.put(transaction)
except Queue.Empty:
break
#get all our sockets
with self.socketListLock:
socketIds = self.sockets.keys()
for sockId in socketIds:
s = self.sockets[sockId] #this is a WebSocketClient
try:
while s.recvQueue.empty() == False:
#put their receive queue into the approproate process
transaction = s.recvQueue.get_nowait()
processes[s.serviceId].recvQueue.put(transaction)
#if this was a close transaction, we need to remove it from our list
if transaction.transactionType == WebSocketTransaction.TRANSACTION_CLOSE:
with self.socketListLock:
self.sockets.pop(sockId)
except Queue.Empty:
break;
time.sleep(0.005) #sleep for 5 ms before doing this again
def run(self):
"""Main thread method which will run until all sockets are no longer active"""
#start the queue helper
queueHelper = threading.Thread(target=self.__queueHelper)
queueHelper.start()
while self.stopEvent.is_set() == False:
with self.socketListLock:
#get the list of socket ids so that we can iterate through them without eating up the socket list lock
#in theory, fetching an item from a dictionary in python is thread safe
socketIds = self.sockets.keys()
for sockId in socketIds:
s = self.sockets[sockId] #these are not sockets, but WebSocket objects
if s.open == False:
#remove this socket from our list and put this event into the receive queue
print "Notice: Socket", s, "removed."
s.recvQueue.put(WebSocketTransaction(WebSocketTransaction.TRANSACTION_CLOSE, sockId, None))
continue #skip the rest of this
with s.lock: #lock the individiual socket
#sadly, we need to call select on every socket individually so that we can keep track of the WebSocketClient class
sList = [ s.connection ]
try:
r, w, x = select.select(sList, sList, sList, 0)
except socket.error:
#it failed. remove this client
print "Notice: Socket", s, "failed."
s.open = False #this is no longer open, but we can't call close since it would break it some more
if x:
print "Notice: Socket", s, "has an exceptional condition"
with s.lock:
s.open = False
if r:
#the socket is ready to be read
try:
with s.lock:
received = r[0].recv(4096) #we will receive up to 4096 bytes
receivedBytes = bytearray(received)
if len(receivedBytes) == 0:
#the socket was gracefully closed on the other end
s.close()
while len(receivedBytes) > 0:
receivedBytes = s._readProgress.receive(receivedBytes)
if s._readProgress.state == WebSocketClient.WebSocketRecvState.STATE_DONE:
#a string was read, so put it in the queue
try:
transaction = WebSocketTransaction(WebSocketTransaction.TRANSACTION_DATA, s.id, s._readProgress.unmaskedPayloadBytes.decode(sys.getdefaultencoding()))
s.recvQueue.put_nowait(transaction)
except Queue.Full:
logging.warning("Notice: Receive queue full on WebSocketClient" + str(s) + "... did you forget to empty the queue or call task_done?")
pass #oh well...I guess their data gets to be lost since they didn't bother to empty their queue
s._readProgress = WebSocketClient.WebSocketRecvState() #reset the progress
except WebSocketInvalidDataException:
#The socket got some bad data, so it should be closed
with s.lock:
s.open = False
except socket.error:
pass #don't worry about it
if w:
#the socket is ready to be written
#for writing, the exception catcher has to be inside rather than outside
#everything like the received catcher was since we need to make sure to
#inform the sendqueue that we are done with the passed task
with s.lock:
if s._writeProgress != None:
#we still have something to write
try:
s._writeProgress = self._sendToSocket(s._writeProgress, s.connection)
except socket.error:
#probably a broken pipe. don't worry about it...it will be caught on the next loop around
pass
elif not s.sendQueue.empty():
#there is something new to start sending
try:
transaction = s.sendQueue.get_nowait()
if (transaction.transactionType == WebSocketTransaction.TRANSACTION_CLOSE):
#they want us to close the socket...
s.close()
else:
#they want us to write something to the socket
toWrite = self._stringToFrame(transaction.data)
try:
self._sendToSocket(toWrite, s.connection)
except socket.error:
#probably a broken pipe. don't worry about it...it will be caught on the next loop around
pass
except Queue.Empty:
pass #don't worry about it...we just couldn't get anything
time.sleep(0.025) #wait 25ms for anything else to happen on the socket so we don't use 100% cpu on this one thread
__idLock = multiprocessing.Lock()
__currentSocketId = 0
@staticmethod
def __getSocketId():
ret = None
with WebSocketClient.__idLock:
ret = WebSocketClient.__currentSocketId
WebSocketClient.__currentSocketId = ret + 1
return ret
def __init__(self, wsManager, conn, addr):
"""Initializes the web socket client
wsManager: websocket manager that can be used
conn: socket object to use as the connection which has already had it's hand shaken
addr: address of the client"""
self.id = WebSocketClient.__getSocketId()
self.serviceId = None #this is used externally to map this socket to a specific service
self.wsManager = wsManager
self.connection = conn
self.address = addr
self.open = True #we assume it is open
self.sendQueue = Queue.Queue()
self.recvQueue = Queue.Queue()
self.lock = threading.Lock() #This lock only needs to be used when accessing anything but the queues
self._readProgress = WebSocketClient.WebSocketRecvState()
self._writeProgress = None
wsManager.addWebSocket(self)
def close(self):
"""Closes the connection"""
if not self.open:
return
self.open = False
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
|
watchdog_plugin.py
|
#!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import time
import os
import copy
from ycm import utils
from threading import Thread, Lock
# This class implements the Bottle plugin API:
# http://bottlepy.org/docs/dev/plugindev.html
#
# The idea here is to decorate every route handler automatically so that on
# every request, we log when the request was made. Then a watchdog thread checks
# every check_interval_seconds whether the server has been idle for a time
# greater that the passed-in idle_suicide_seconds. If it has, we kill the
# server.
#
# We want to do this so that if something goes bonkers in Vim and the server
# never gets killed by the client, we don't end up with lots of zombie servers.
class WatchdogPlugin( object ):
name = 'watchdog'
api = 2
def __init__( self,
idle_suicide_seconds,
check_interval_seconds = 60 * 10 ):
self._check_interval_seconds = check_interval_seconds
self._idle_suicide_seconds = idle_suicide_seconds
self._last_request_time = time.time()
self._last_request_time_lock = Lock()
if idle_suicide_seconds <= 0:
return
self._watchdog_thread = Thread( target = self._WatchdogMain )
self._watchdog_thread.daemon = True
self._watchdog_thread.start()
def _GetLastRequestTime( self ):
with self._last_request_time_lock:
return copy.deepcopy( self._last_request_time )
def _SetLastRequestTime( self, new_value ):
with self._last_request_time_lock:
self._last_request_time = new_value
def _WatchdogMain( self ):
while True:
time.sleep( self._check_interval_seconds )
if time.time() - self._GetLastRequestTime() > self._idle_suicide_seconds:
utils.TerminateProcess( os.getpid() )
def __call__( self, callback ):
def wrapper( *args, **kwargs ):
self._SetLastRequestTime( time.time() )
return callback( *args, **kwargs )
return wrapper
|
gui.py
|
import runpy
import threading
from Tkinter import *
def run():
th = threading.Thread(target=threadFunc)
th.start()
labelText1.set("Running...")
th.join()
labelText1.set("Done!")
def threadFunc():
runpy.run_path('./run.py', run_name='__main__')
app = Tk()
app.title("Fasternet")
app.configure(bg="white")
app.geometry('500x500')
canvas = Canvas(app, width = 500, height = 150)
canvas.configure(bg="white")
canvas.pack()
img = PhotoImage(file="logo.gif")
canvas.create_image(20,20, anchor=NW, image=img)
button1 = Button(app, text="Start", width=10, command=run, bg="#1d9641", activebackground="#fff", activeforeground="#1d9641", fg="#fff", font=("Lucida Sans", 14))
button1.pack(side='bottom', padx=20, pady=50)
labelText1 = StringVar()
labelText1.set("Waiting to begin")
label1 = Label(app, textvariable=labelText1, height=1, text="Lucida Sans", font=("Lucida Sans", 15), bg="white")
label1.pack(side='bottom')
labelText2 = StringVar()
labelText2.set("")
label2 = Label(app, textvariable=labelText2, height=1, text="Lucida Sans", font=("Lucida Sans", 15), bg="white")
label2.pack()
labelText3 = StringVar()
labelText3.set("Click below to optimize your internet connection.")
label3 = Label(app, textvariable=labelText3, height=1, text="Lucida Sans", font=("Lucida Sans", 13), bg="white")
label3.pack()
labelText4 = StringVar()
labelText4.set("We'll take care of the details.")
label4 = Label(app, textvariable=labelText4, height=1, text="Lucida Sans", font=("Lucida Sans", 13), bg="white")
label4.pack()
app.iconbitmap(r'transparenticon.ico')
app.mainloop()
|
clientVideo.py
|
import cv2
from socket import socket, AF_INET, SOCK_STREAM
from imutils.video import WebcamVideoStream
from threading import Thread
import numpy as np
import zlib
import struct
HOST = input("Enter Server IP\n")
PORT = 3000
CHUNK=1024
lnF = 640*480*3
def SendFrame():
while True:
try:
frame = wvs.read()
cv2_im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (640, 480))
frame = np.array(frame, dtype = np.uint8).reshape(1, lnF)
jpg_as_text = bytearray(frame)
databytes = zlib.compress(jpg_as_text, 9)
length = struct.pack('!I', len(databytes))
bytesToBeSend = b''
client.sendall(length)
while len(databytes) > 0:
if (1000 * CHUNK) <= len(databytes):
bytesToBeSend = databytes[:(1000 * CHUNK)]
databytes = databytes[(1000 * CHUNK):]
client.sendall(bytesToBeSend)
else:
bytesToBeSend = databytes
client.sendall(bytesToBeSend)
databytes = b''
print("##### Data Sent!! #####")
except:
continue
def RecieveMedia():
while True:
try:
lengthbuf = recvall(4)
length, = struct.unpack('!I', lengthbuf)
databytes = recvall(length)
img = zlib.decompress(databytes)
if len(databytes) == length:
print("Recieving Media..")
print("Image Frame Size:- {}".format(len(img)))
img = np.array(list(img))
img = np.array(img, dtype = np.uint8).reshape(480, 640, 3)
cv2.imshow("Stream", img)
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
else:
print("Data CORRUPTED")
except:
continue
def recvall(size):
databytes = b''
while len(databytes) != size:
to_read = size - len(databytes)
if to_read > (1000 * CHUNK):
databytes += client.recv(1000 * CHUNK)
else:
databytes += client.recv(to_read)
return databytes
client = socket(family=AF_INET, type=SOCK_STREAM)
client.connect((HOST, PORT))
wvs = WebcamVideoStream(0).start()
initiation = client.recv(5).decode()
if initiation == "start":
RecieveFrameThread = Thread(target=RecieveMedia).start()
SendFrameThread = Thread(target=SendFrame).start()
|
AlgorithmTest.py
|
import math
from src.Movement import FollowingDrone
import time
from threading import Thread
# Create sample world that the Car and Drone move within
class Car:
def __init__(self, start_x, start_y):
self.x = start_x
self.y = start_y
# Can be any series of movements within the world. Should not be jumpy.
def move(self, world_x_limit, world_y_limit):
# Move in a circular motion
# First move to the edge of circle from the start location
while True:
if self.x >= 0.95 * world_x_limit and self.y >= 0.5 * world_y_limit:
break
if self.x <= 0.95 * world_x_limit:
self.x += 0.1
if self.y <= 0.5 * world_y_limit:
self.y += 0.1
time.sleep(0.0001)
# Car should now be on the edge of the circle
# Now continuously move around in the circle
t = 0
while True:
self.x = world_x_limit / 2 + world_x_limit * 0.45 * math.cos(t)
self.y = world_y_limit / 2 + world_y_limit * 0.45 * math.sin(t)
t += 0.001
time.sleep(0.001) # Not related to t value
class TestFollowingDrone(FollowingDrone):
world_x = 0
world_y = 0
# The tilt angles won't directly convert into virtual world movement. Scale with this
test_scale_factor = 1
# Drone can see objects within a box.
# Square box, with side length given here - drone at the centre
field_of_view = 25
def __init__(self, x, y):
super(FollowingDrone, self).__init__()
self.world_x = x
self.world_y = y
def print_coords(self):
print(self.world_x, " ", self.world_y)
def sleep(self, time_length):
time.sleep(time_length)
# Override the movement function so that movements are made within virtual world
def move(self, vertical_movement=0):
self.world_x += self.roll * self.movement_gap * self.test_scale_factor
self.world_y += self.pitch * self.movement_gap * self.test_scale_factor
# Manages the coordinates of the drone and car in the virtual world
# Determines the virtual coordinates of the car in relation to the drone
class World:
# car_x = 0
# car_y = 0
# drone_x = 0
# drone_y = 0
def __init__(self):
self.drone_x = 0
self.drone_y = 0
self.car = Car(0, 0)
self.drone = TestFollowingDrone(self.drone_x, self.drone_y)
# Define the size of the world
self.world_size_x = 1000
self.world_size_y = 1000
# Choose a location for the car to start in the world
# self.car_x = 0
# self.car_y = 0
# Calculates the position that the drone will see the car being in
# Car must lie within the drones field of view, otherwise no location will be found
def calculate_rel_position(self):
x = float(self.car.x - self.drone_x) / self.drone.field_of_view
y = float(self.car.y - self.drone_y) / self.drone.field_of_view
if x < -1 or x > 1 or y < -1 or y > 1:
self.drone.car_unknown = True
return
self.drone.car_rel_x = x
self.drone.car_rel_y = y
# print(x, y)
# Manage the world in a thread - updating the location of the drone as it moves, and calculating relative
# position of the car
def manage(self):
while True:
self.drone_x = self.drone.world_x
self.drone_y = self.drone.world_y
self.calculate_rel_position()
# car = Car(0, 0)
world = World()
# Start the world managing
thread1 = Thread(target=world.manage, args=[])
# Set the drone to start following the car
thread2 = Thread(target=world.drone.follow_car, args=[])
# Start the car moving
thread3 = Thread(target=world.car.move, args=[world.world_size_x, world.world_size_y])
thread1.start()
thread2.start()
thread3.start()
while True:
print(round(world.car.x, 2), " ", round(world.car.y, 2), " ", round(world.drone_x, 2), " ",
round(world.drone_y, 2))
time.sleep(0.1)
|
test_tune_restore.py
|
# coding: utf-8
import signal
from collections import Counter
import os
import shutil
import tempfile
import time
import unittest
import skopt
import numpy as np
from hyperopt import hp
from nevergrad.optimization import optimizerlib
from zoopt import ValueType
import ray
from ray import tune
from ray.test_utils import recursive_fnmatch
from ray.rllib import _register_all
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import ConcurrencyLimiter, Searcher
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.dragonfly import DragonflySearch
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest.skopt import SkOptSearch
from ray.tune.suggest.nevergrad import NevergradSearch
from ray.tune.suggest.optuna import OptunaSearch, param as ot_param
from ray.tune.suggest.sigopt import SigOptSearch
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.utils import validate_save_restore
from ray.tune.utils._mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
class TuneInterruptionTest(unittest.TestCase):
def testExperimentInterrupted(self):
import multiprocessing
trainer_semaphore = multiprocessing.Semaphore()
driver_semaphore = multiprocessing.Semaphore()
class SteppingCallback(Callback):
def on_step_end(self, iteration, trials, **info):
driver_semaphore.release() # Driver should continue
trainer_semaphore.acquire() # Wait until released
def _run(local_dir):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback()])
local_dir = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run, args=(local_dir, ))
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, steps=20):
self._step = 0
self.steps = steps
def on_trial_start(self, trials, **info):
self._step += 1
if self._step >= self.steps:
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
@classmethod
def setUpClass(cls):
ray.init(local_mode=True, num_cpus=2)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
def testFailResumeGridSearch(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback()],
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback()],
**config)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
def testFailResumeWithPreset(self):
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir))
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertWarnsRegex(UserWarning,
"exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(10)],
**config)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
from ray.tune.examples.hyperband_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
from ray.tune.utils.mock import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class AbstractWarmStartTest:
def setUp(self):
ray.init(num_cpus=1, local_mode=True)
self.tmpdir = tempfile.mkdtemp()
self.experiment_name = "results"
def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()
_register_all()
def set_basic_conf(self):
raise NotImplementedError()
def run_part_from_scratch(self):
np.random.seed(162)
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
results_exp_1 = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
checkpoint_path = os.path.join(self.tmpdir, "warmStartTest.pkl")
search_alg.save(checkpoint_path)
return results_exp_1, np.random.get_state(), checkpoint_path
def run_from_experiment_restore(self, random_state):
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
search_alg.restore_from_dir(
os.path.join(self.tmpdir, self.experiment_name))
results = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
return results
def run_explicit_restore(self, random_state, checkpoint_path):
np.random.set_state(random_state)
search_alg2, cost = self.set_basic_conf()
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
return tune.run(cost, num_samples=5, search_alg=search_alg2, verbose=0)
def run_full(self):
np.random.seed(162)
search_alg3, cost = self.set_basic_conf()
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
return tune.run(
cost, num_samples=10, search_alg=search_alg3, verbose=0)
def testWarmStart(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_explicit_restore(r_state, checkpoint_path)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
def testRestore(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_from_experiment_restore(r_state)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
class HyperoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"x": hp.uniform("x", 0, 10),
"y": hp.uniform("y", -10, 10),
"z": hp.uniform("z", -10, 0)
}
def cost(space, reporter):
loss = space["x"]**2 + space["y"]**2 + space["z"]**2
reporter(loss=loss)
search_alg = HyperOptSearch(
space,
metric="loss",
mode="min",
random_state_seed=5,
n_initial_points=1,
max_concurrent=1000 # Here to avoid breaking back-compat.
)
return search_alg, cost
class BayesoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self, analysis=None):
space = {"width": (0, 20), "height": (-100, 100)}
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = BayesOptSearch(
space, metric="loss", mode="min", analysis=analysis)
return search_alg, cost
def testBootStrapAnalysis(self):
analysis = self.run_full()
search_alg3, cost = self.set_basic_conf(analysis)
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
tune.run(cost, num_samples=10, search_alg=search_alg3, verbose=0)
class SkoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
optimizer = skopt.Optimizer([(0, 20), (-100, 100)])
previously_run_params = [[10, 0], [15, -20]]
known_rewards = [-189, -1144]
def cost(space, reporter):
reporter(loss=(space["height"]**2 + space["width"]**2))
search_alg = SkOptSearch(
optimizer,
["width", "height"],
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
return search_alg, cost
class NevergradWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
instrumentation = 2
parameter_names = ["height", "width"]
optimizer = optimizerlib.OnePlusOne(instrumentation)
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = NevergradSearch(
optimizer,
parameter_names,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
class OptunaWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from optuna.samplers import TPESampler
space = [
ot_param.suggest_uniform("width", 0, 20),
ot_param.suggest_uniform("height", -100, 100)
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = OptunaSearch(
space, sampler=TPESampler(seed=10), metric="loss", mode="min")
return search_alg, cost
class DragonflyWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from dragonfly.opt.gp_bandit import EuclideanGPBandit
from dragonfly.exd.experiment_caller import EuclideanFunctionCaller
from dragonfly import load_config
def cost(space, reporter):
height, width = space["point"]
reporter(loss=(height - 14)**2 - abs(width - 3))
domain_vars = [{
"name": "height",
"type": "float",
"min": -10,
"max": 10
}, {
"name": "width",
"type": "float",
"min": 0,
"max": 20
}]
domain_config = load_config({"domain": domain_vars})
func_caller = EuclideanFunctionCaller(
None, domain_config.domain.list_of_domains[0])
optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
search_alg = DragonflySearch(
optimizer,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
@unittest.skip("Skip because this doesn't seem to work.")
def testWarmStart(self):
pass
@unittest.skip("Skip because this doesn't seem to work.")
def testRestore(self):
pass
class SigOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = [
{
"name": "width",
"type": "int",
"bounds": {
"min": 0,
"max": 20
},
},
{
"name": "height",
"type": "int",
"bounds": {
"min": -100,
"max": 100
},
},
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
# Unfortunately, SigOpt doesn't allow setting of random state. Thus,
# we always end up with different suggestions, which is unsuitable
# for the warm start test. Here we make do with points_to_evaluate,
# and ensure that state is preserved over checkpoints and restarts.
points = [
{
"width": 5,
"height": 20
},
{
"width": 10,
"height": -20
},
{
"width": 15,
"height": 30
},
{
"width": 5,
"height": -30
},
{
"width": 10,
"height": 40
},
{
"width": 15,
"height": -40
},
{
"width": 5,
"height": 50
},
{
"width": 10,
"height": -50
},
{
"width": 15,
"height": 60
},
{
"width": 12,
"height": -60
},
]
search_alg = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="loss",
mode="min",
points_to_evaluate=points)
return search_alg, cost
def testWarmStart(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testWarmStart()
def testRestore(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testRestore()
class ZOOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
dim_dict = {
"height": (ValueType.CONTINUOUS, [-100, 100], 1e-2),
"width": (ValueType.DISCRETE, [0, 20], False)
}
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=200,
dim_dict=dim_dict,
metric="loss",
mode="min")
return search_alg, cost
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
test_3308_cache_id.py
|
#!/usr/bin/env python -i
# -*- mode: Python; tab-width: 4; indent-tabs-mode: nil; -*-
# ex: set tabstop=4
# Please do not change the two lines above. See PEP 8, PEP 263.
"""This script was built to test doing various large resultdata sets with and without caching and paging
"""
pytan_loc = '~/gh/pytan'
import os
import sys
sys.dont_write_bytecode = True
sys.path.append(os.path.join(os.path.expanduser(pytan_loc), 'lib'))
import pytan
import getpass
import json
import datetime
import threading
import time
import logging
logging.Formatter.converter = time.gmtime
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
console_handler.setLevel(logging.DEBUG)
mylog = logging.getLogger('LOG')
mylog.setLevel(logging.DEBUG)
mylog.addHandler(console_handler)
# connection info for Tanium Server
handler_args = {
'username': raw_input('Tanium Username: ').strip(),
'password': getpass.getpass('Tanium Password: ').strip(),
'host': raw_input('Tanium Host: ').strip(),
'port': int(raw_input('Tanium Port: ').strip()),
'loglevel': 0,
}
my_name = 'test_3308_cache_id'
basefilename = '{}-{}'.format(my_name, pytan.utils.seconds_from_now())
logfile = '{}.log'.format(basefilename)
resultsfile = '{}.results.txt'.format(basefilename)
# global settings that we want to change for this run:
GLOBAL_SETTINGS = [
{'name': "question_hops_per_report", 'new_val': '1'},
{'name': "question_min_hops_per_report", 'new_val': '1'},
]
# json of sensor to generate random strings
RANDOM_SENSOR = (
'''{
"_type": "sensor",
"category": "Miscellaneous",
"delimiter": ",",
"exclude_from_parse_flag": 1,
"hidden_flag": 0,
"ignore_case_flag": 1,
"max_age_seconds": 900,
"name": "1kRandomStrings",
"queries": {
"_type": "queries",
"query": [
{
"_type": "query",
"platform": "Linux",
"script": "#!/bin/bash\\nhostname=`hostname`\\nfor i in $(seq 1 1000); do\\necho "$hostname-$RANDOM$RANDOM"\\ndone\\n",
"script_type": "UnixShell"
}
]
},
"value_type": "String"
}
'''
)
# name to add to sensor as part of pytan copy
RANDOM_STR = 'PyTan Test'
TESTS = [
{'sensors': [], 'paging': False, 'caching': False},
{'sensors': [], 'paging': True, 'caching': False},
{'sensors': [], 'paging': True, 'caching': 900},
# {'sensors': ['Computer Name'], 'paging': False, 'caching': False},
# {'sensors': ['Computer Name'], 'paging': True, 'caching': False},
# {'sensors': ['Computer Name'], 'paging': True, 'caching': 900},
]
STATS_TIMER = 30
RESET_GLOBALS = True
PRINT_STATS = True
PAGE_LIMIT = 50000
CACHE_EXPIRATION = 900
WAIT_FOR_LAST_QUESTION = False
# ____________________________ FUNCTIONS
def update_global_settings():
# update global settings accordingly
for i in GLOBAL_SETTINGS:
i['obj'] = handler.get('setting', name=i['name'])[0]
if not i['obj'].value == i['new_val']:
i['orig_value'] = i['obj'].value
i['obj'].value = i['new_val']
i['obj'] = handler.session.save(i['obj'])
mylog.info((
"Updated {name!r} from {orig_value!r} to {new_val!r}"
).format(**i))
def restore_global_settings():
# restore global settings accordingly
for i in GLOBAL_SETTINGS:
i['obj'] = handler.get('setting', name=i['name'])[0]
i['current_value'] = i['obj'].value
if i.get('orig_value', None):
i['obj'].value = i['orig_value']
i['obj'] = handler.session.save(i['obj'])
mylog.info((
"Restored {name!r} from {current_value!r} to {orig_value!r}"
).format(**i))
def reset_global_settings():
# reset global settings accordingly
for i in GLOBAL_SETTINGS:
i['obj'] = handler.get('setting', name=i['name'])[0]
i['current_value'] = i['obj'].value
i['default_value'] = i['obj'].default_value
if i['current_value'] != i['default_value']:
i['obj'].value = i['obj'].default_value
i['obj'] = handler.session.save(i['obj'])
# mylog.info((
# "Reset {name!r} from {current_value!r} to {default_value!r}"
# ).format(**i))
def make_random_copy(cnt):
random_obj = json.loads(RANDOM_SENSOR)
random_obj = pytan.taniumpy.BaseType.from_jsonable(random_obj)
random_obj.name = '{} {} #{}'.format(random_obj.name, RANDOM_STR, cnt)
random_obj = handler.session.add(random_obj)
random_obj = handler.session.find(random_obj)
mylog.info((
"Created new random sensor copy: {}"
).format(random_obj.name))
return random_obj
def clean_random_copies():
random_obj = json.loads(RANDOM_SENSOR)
random_obj = pytan.taniumpy.BaseType.from_jsonable(random_obj)
sw = '{} {}'.format(random_obj.name, RANDOM_STR)
all_sensors = handler.get_all('sensor')
rand_sensors = [x for x in all_sensors if x.name.startswith(sw)]
for x in rand_sensors:
try:
handler.session.delete(x)
mylog.info((
"Removed old random sensor copy: {}"
).format(x.name))
except:
pass
def get_result_info(x):
retry_count = 5
current_try = 0
while not current_try >= retry_count:
try:
ri = handler.session.getResultInfo(x['question'])
break
except:
if current_try > retry_count:
raise
else:
mylog.exception((
"GetResultInfo failed on attempt #{} for question {}"
).format(current_try, x['question']))
current_try += 1
return ri
def poll_result_info(x):
x['ri_start'] = datetime.datetime.utcnow()
# pollerll for completion via pytan's questionpoller with getresultinfo
poller = pytan.pollers.QuestionPoller(handler, x['question'], polling_secs=30)
retry_count = 5
current_try = 0
while not current_try >= retry_count:
try:
poller.run()
break
except:
if current_try > retry_count:
raise
else:
mylog.exception((
"Poll ResultInfo failed on attempt #{} for question {}"
).format(current_try, x['question']))
current_try += 1
x['ri'] = get_result_info(x)
x['ri_end'] = datetime.datetime.utcnow()
x['ri_elapsed'] = x['ri_end'] - x['ri_start']
x['ri_reached_pct'] = get_percentage(x['ri'].mr_tested, x['ri'].estimated_total)
mylog.info((
"[TEST #{cnt}] Polling for resultinfo = 99% (reached: {ri_reached_pct}) "
"took: {ri_elapsed}"
).format(**x))
return x
def normal_get_result_data(x):
x['rd'] = get_rd(x['question'])
return x
def get_paging(x):
if x['paging'] is True:
x['paging'] = calc_percent(10, x['ri'].row_count)
if PAGE_LIMIT:
if x['paging'] > PAGE_LIMIT:
x['paging'] = PAGE_LIMIT
return x
def get_rd(q, **kwargs):
retry_count = 5
current_try = 0
while not current_try >= retry_count:
try:
rd = handler.session.getResultData(q, **kwargs)
break
except:
if current_try > retry_count:
raise
else:
mylog.exception((
"GetResultData failed on attempt #{} for question {} with args: {}"
).format(current_try, q, kwargs))
current_try += 1
rd.body_len = len(handler.session.response_body)
rd.row_len = len(rd.rows)
return rd
def paging_get_result_data(x):
x = get_paging(x)
current_row = 0
total_rows = x['ri'].row_count
all_rows = []
current_page = 1
while True:
get_rd_args = {'row_count': x['paging'], 'row_start': current_row}
new_data = get_rd(x['question'], **get_rd_args)
if new_data is None:
mylog.warning("None returned for result data!")
break
data = new_data
mylog.info((
"[TEST #{cnt}] Getting result data page {current_page} for {data.row_count} total rows returned {data.row_len} rows (args: {args}) (response len: {data.body_len})"
).format(current_page=current_page, data=data, args=get_rd_args, **x))
all_rows += data.rows
if not data.rows:
mylog.warning("No rows returned!")
break
if current_row >= total_rows:
break
current_row += x['paging']
current_page += 1
data.rows = all_rows
x['rd'] = data
return x
def paging_caching_get_result_data(x):
x = get_paging(x)
current_row = 0
total_rows = x['ri'].row_count
all_rows = []
cache_id = None
current_page = 1
while True:
get_rd_args = {
'row_count': x['paging'],
'row_start': current_row,
'cache_expiration': x['caching'],
}
if cache_id:
get_rd_args['cache_id'] = cache_id
new_data = get_rd(x['question'], **get_rd_args)
if new_data is None:
mylog.warning("None returned for result data!")
break
data = new_data
mylog.info((
"[TEST #{cnt}] Getting result data page {current_page} for {data.row_count} total rows returned {data.row_len} rows (args: {args}) (response len: {data.body_len})"
).format(current_page=current_page, data=data, args=get_rd_args, **x))
if get_rd_args.get('cache_id', ''):
if not get_rd_args['cache_id'] == data.cache_id:
mylog.warning((
"cache id changed from {} to {}"
).format(get_rd_args['cache_id'], data.cache_id))
cache_id = data.cache_id
all_rows += data.rows
if not data.rows:
mylog.warning("No rows returned!")
break
if current_row >= total_rows:
break
current_row += x['paging']
current_page += 1
data.rows = all_rows
x['rd'] = data
return x
def get_result_data(x):
x['rd_start'] = datetime.datetime.utcnow()
if x['paging']:
if x['caching']:
x = paging_caching_get_result_data(x)
else:
x = paging_get_result_data(x)
else:
x = normal_get_result_data(x)
x['rd_end'] = datetime.datetime.utcnow()
x['rd_elapsed'] = x['rd_end'] - x['rd_start']
mylog.info((
"[TEST #{cnt}] Getting result data for {rd.row_count} total rows returned {rd.row_len}"
" rows with paging: {paging} and caching: {caching} took: {rd_elapsed} - "
"(response len: {rd.body_len})"
).format(**x))
return x
def get_percentage(part, whole):
f = 100 * float(part) / float(whole)
return "{0:.2f}%".format(f)
def calc_percent(percent, whole):
return int((percent * whole) / 100.0)
def get_stats(wait=20):
while True:
if PRINT_STATS:
si = handler.session.get_server_info()
try:
pydiags = si.get('pydiags', {})
strcache = pydiags['String Cache']
sysperf = pydiags['System Performance Info']
aqc = pydiags['Active Question Cache']
mem_avail = get_percentage(sysperf['PhysicalAvailable'], sysperf['PhysicalTotal'])
handles = sysperf['HandleCount']
processes = sysperf['ProcessCount']
strings = strcache['Total String Count']
est_count = aqc['Active Client Estimate']
active_questions = aqc['Active Question Estimate']
mylog.info((
" -- STATS: Memory Available: {}, Handles: {}, Processes: {}, Strings: {}"
", Est Total: {}, Active Questions: {}"
).format(
mem_avail, handles, processes, strings, est_count, active_questions,
))
except:
mylog.warning("System Info failed to fetch! {}".format(si))
time.sleep(wait)
def setup_logging():
console_handler.setLevel(logging.DEBUG)
mylog.setLevel(logging.DEBUG)
# my_file = os.path.abspath(sys.argv[0])
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
file_handler.setLevel(logging.DEBUG)
mylog.addHandler(file_handler)
mylog.info("Logging to {}".format(logfile))
def start_stats_thread():
thread = threading.Thread(target=get_stats, args=(STATS_TIMER,))
thread.daemon = True
thread.start()
mylog.info("Started up stats thread every {} seconds...".format(STATS_TIMER))
return thread
def start_tests(tests, fd):
# create copies of the random sensor for each test
for idx, x in enumerate(tests):
x['random_obj'] = make_random_copy(idx + 1)
mylog.info("Waiting three minutes for new sensor propagation...")
time.sleep(180)
last_question_expiration = None
for idx, x in enumerate(tests):
if WAIT_FOR_LAST_QUESTION and last_question_expiration:
cache_expiry_min = 5
cache_expiry = last_question_expiration + datetime.timedelta(
seconds=cache_expiry_min * 60)
spewage = False
while True:
if datetime.datetime.utcnow() >= cache_expiry:
mylog.info((
"Last question expired ({} + {} minutes), starting up next question..."
).format(last_question_expiration, cache_expiry_min))
break
if not spewage:
mylog.info((
"Last question not expired yet ({} + {} minutes), waiting..."
).format(last_question_expiration, cache_expiry_min))
spewage = True
time.sleep(60)
x['cnt'] = idx + 1
# add the random object to the sensors
x['sensors'].append(x['random_obj'].name)
mylog.info((
"[TEST #{cnt}] Starting new test -- sensors: {sensorstxt}, paging: {paging}, "
"caching: {caching}"
).format(sensorstxt=';'.join(x['sensors']), **x))
# ask the question without using pytan's get result logic
x['question'] = handler.ask_manual_human(sensors=x['sensors'], get_results=False)
x['question'] = x['question']['question_object']
last_question_expiration = pytan.utils.timestr_to_datetime(x['question'].expiration)
mylog.info((
"[TEST #{cnt}] Question asked: ID: {question.id}, Query: {question.query_text!r}, expires: {expires}"
).format(expires=last_question_expiration, **x))
try:
x = poll_result_info(x)
except:
mylog.exception("Exception occurred during poll_result_info")
try:
x = get_result_data(x)
except:
mylog.exception("Exception occurred during get_result_data")
try:
with open(resultsfile, 'a+b') as fd:
fd.write((
"{cnt}, {sensorstxt}, {rd.row_count}, {row_len}, {paging}, {caching}, "
"{ri_elapsed}, {rd_elapsed}\n"
).format(sensorstxt=';'.join(x['sensors']), row_len=len(x['rd'].rows), **x))
except:
mylog.exception("Exception occurred during results file write")
return tests
# ____________________________ START WORKFLOW
# connect to tanium using handler_args
handler = pytan.handler.Handler(**handler_args)
setup_logging()
stats_thread = start_stats_thread()
with open(resultsfile, 'wb') as fd:
fd.write((
'test#, sensors, row_count_total, row_count_returned, paging, caching, '
'ri_elapsed, rd_elapsed\n'
))
if RESET_GLOBALS:
reset_global_settings()
clean_random_copies()
update_global_settings()
test_results = start_tests(TESTS, fd)
# ____________________________ CLEANUP WORKFLOW
clean_random_copies()
restore_global_settings()
|
watcher.py
|
from datetime import datetime, timedelta
from genericpath import exists
from os import getcwd, mkdir, utime, listdir, rmdir, remove, environ
from os.path import join
from shutil import rmtree, disk_usage
from threading import Thread
from time import mktime, sleep
from unittest import TestCase
environ["VISION_HOME"] = join(getcwd(), "watcher")
from core import storage
from runtime import watcher
DATA_DIR = join(getcwd(), "watcherdata")
class WatcherTestCase(TestCase):
def setUp(self):
storage.setup()
if not exists(DATA_DIR):
mkdir(DATA_DIR)
# create a file
self.oldest_path = join(DATA_DIR, "oldest.mp4")
with open(self.oldest_path, "wb"):
pass
# create a file
self.newest_path = join(DATA_DIR, "newest.mp4")
with open(self.newest_path, "w"):
pass
self.initial_free = watcher.usage_percentage(DATA_DIR)
def tearDown(self):
rmtree(DATA_DIR)
rmtree(environ["VISION_HOME"])
class Watcher(WatcherTestCase):
def test_threshold_not_reached(self):
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 2)
if watcher.usage_percentage(DATA_DIR) > self.initial_free:
watcher.clean(DATA_DIR)
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 2)
def test_threshold_reached(self):
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 2)
total, _, _ = disk_usage(DATA_DIR)
oldest_path_size = int(total / 100)
with open(self.oldest_path, "wb") as oldest:
oldest.seek(oldest_path_size - 1)
oldest.write(b"\0")
minus_3_days = datetime.now() - timedelta(seconds=60)
minus_3_days = mktime(minus_3_days.timetuple())
utime(self.oldest_path, (minus_3_days, minus_3_days))
if watcher.usage_percentage(DATA_DIR) > self.initial_free:
watcher.clean(DATA_DIR)
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 1)
def test_loop(self):
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 2)
def loop(segment_dirs, loop_interval, output, threshold):
with storage.get_connection() as conn:
watcher.loop(conn, segment_dirs, loop_interval, output, threshold)
thread = Thread(target=loop, args=([DATA_DIR], 1, DATA_DIR, self.initial_free,))
thread.daemon = True
thread.start()
sleep(3)
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 2)
total, _, _ = disk_usage(DATA_DIR)
oldest_path_size = int(total / 50)
with open(self.newest_path, "wb") as oldest:
oldest.seek(oldest_path_size - 1)
oldest.write(b"\0")
while 1:
file_count = len(listdir(DATA_DIR))
if file_count == 1:
break
sleep(1)
self.assertEqual(file_count, 1)
watcher.stop()
thread.join()
self.assertEqual(storage.get_int(storage.get_connection(), watcher.WATCHER_DELETED_TOTAL), 1)
self.assertEqual(storage.get_int(storage.get_connection(), watcher.WATCHER_DELETED_SINCE_START), 1)
thread = Thread(target=loop, args=([DATA_DIR], 1, DATA_DIR, self.initial_free,))
thread.daemon = True
thread.start()
file_count = len(listdir(DATA_DIR))
self.assertEqual(file_count, 1)
with open(self.newest_path, "wb") as oldest:
oldest.seek(oldest_path_size - 1)
oldest.write(b"\0")
while 1:
file_count = len(listdir(DATA_DIR))
if file_count == 0:
break
sleep(1)
self.assertEqual(file_count, 0)
watcher.stop()
thread.join()
self.assertEqual(storage.get_int(storage.get_connection(), watcher.WATCHER_DELETED_TOTAL), 2)
self.assertEqual(storage.get_int(storage.get_connection(), watcher.WATCHER_DELETED_SINCE_START), 1)
|
jasper-client.py
|
#!/usr/bin/python
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import numpy as np
import os
from speech_utils import AudioSegment, SpeechClient
import soundfile
import pyaudio as pa
import threading
import math
import time
import glob
FLAGS = None
# read audio chunk from a file
def get_audio_chunk_from_soundfile(sf, chunk_size, int_values):
dtype = 'int32' if int_values else 'float32'
audio_signal = sf.read(chunk_size, dtype=dtype)
end = False
# pad to chunk size
if len(audio_signal) < chunk_size:
end = True
audio_signal = np.pad(audio_signal, (0, chunk_size-len(
audio_signal)), mode='constant')
return audio_signal, end
# generator that returns chunks of audio data from file
def audio_generator_from_file(input_filename, target_sr, int_values,
chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
while not end:
audio_signal, end = get_audio_chunk_from_soundfile(
sf, chunk_size, int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
yield audio_segment.samples, target_sr, start, end
start = False
sf.close()
# generator that returns chunks of audio data from file
class AudioGeneratorFromMicrophone:
def __init__(self,input_device_id, target_sr, chunk_duration):
self.recording_state = "init"
self.target_sr = target_sr
self.chunk_duration = chunk_duration
self.p = pa.PyAudio()
device_info = self.p.get_host_api_info_by_index(0)
num_devices = device_info.get('deviceCount')
devices = {}
for i in range(0, num_devices):
if (self.p.get_device_info_by_host_api_device_index(0, i).get(
'maxInputChannels')) > 0:
devices[i] = self.p.get_device_info_by_host_api_device_index(
0, i)
if (len(devices) == 0):
raise RuntimeError("Cannot find any valid input devices")
if input_device_id is None or input_device_id not in \
devices.keys():
print("\nInput Devices:")
for id, info in devices.items():
print("{}: {}".format(id,info.get("name")))
input_device_id = int(input("Enter device id to use: "))
self.input_device_id = input_device_id
def generate_audio(self):
chunk_size = int(self.chunk_duration*self.target_sr)
self. recording_state = "init"
def keyboard_listener():
input("Press Enter to start and end recording...")
self.recording_state = "capture"
print("Recording...")
input("")
self.recording_state = "release"
listener = threading.Thread(target=keyboard_listener)
listener.start()
start = True
end = False
stream_initialized = False
step = 0
while self.recording_state != "release":
try:
if self.recording_state == "capture":
if not stream_initialized:
stream = self.p.open(
format=pa.paInt16,
channels=1,
rate=self.target_sr,
input=True,
input_device_index=self.input_device_id,
frames_per_buffer=chunk_size)
stream_initialized = True
# Read audio chunk from microphone
audio_signal = stream.read(chunk_size)
audio_signal = np.frombuffer(audio_signal,dtype=np.int16)
audio_segment = AudioSegment(audio_signal,
self.target_sr,
self.target_sr)
yield audio_segment.samples, self.target_sr, start, end
start = False
step += 1
except Exception as e:
print(e)
break
stream.close()
self.p.terminate()
def generate_audio_signal(self):
#chunk_size = int(self.chunk_duration*self.target_sr)
chunk_size = int(0.2*self.target_sr)
self. recording_state = "init"
def keyboard_listener():
input("Press Enter to start and end recording...")
self.recording_state = "capture"
print("Recording...")
input("")
self.recording_state = "release"
listener = threading.Thread(target=keyboard_listener)
listener.start()
audio_samples = []
stream_initialized = False
step = 0
while self.recording_state != "release":
try:
if self.recording_state == "capture":
if not stream_initialized:
stream = self.p.open(
format=pa.paInt16,
channels=1,
rate=self.target_sr,
input=True,
input_device_index=self.input_device_id,
frames_per_buffer=chunk_size)
stream_initialized = True
# Read audio chunk from microphone
audio_signal = stream.read(chunk_size)
audio_signal = np.frombuffer(audio_signal,dtype=np.int16)
audio_segment = AudioSegment(audio_signal,
self.target_sr,
self.target_sr)
if step == 0:
audio_samples = audio_segment.samples
else:
audio_samples = np.concatenate((audio_samples,
audio_segment.samples))
start = False
step += 1
except Exception as e:
print(e)
break
stream.close()
self.p.terminate()
return audio_samples
# generator that returns chunks of audio features from file
def audio_features_generator(input_filename, speech_features_params,
target_sr, int_values, chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
while not end:
audio_signal, end = get_audio_chunk_from_soundfile(sf, chunk_size,
int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
audio_features, features_length = get_speech_features(
audio_segment.samples, target_sr, speech_features_params)
yield audio_features, start, end
start = False
sf.close()
def audio_features_generator_with_buffer(input_filename,
speech_features_params, target_sr,
int_values, chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
audio_signal = np.zeros(shape=3*chunk_size, dtype=np.float32)
while not end:
audio_signal[-chunk_size:], end = get_audio_chunk_from_soundfile(sf, chunk_size, int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
audio_features, features_length = get_speech_features(
audio_segment.samples, target_sr, speech_features_params)
yield audio_features, start, end
start = False
audio_signal[:-chunk_size] = audio_signal[chunk_size:]
sf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False,
default=False, help='Enable verbose output')
parser.add_argument('--fixed_size', type=int, required=False,
default=0,
help="send fixed_size requests, pad or truncate")
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size')
parser.add_argument('--model_platform', required=False,
default='trt',
help='Jasper model platform')
parser.add_argument('-u', '--url', type=str, required=False,
default='localhost:8000',
help='Inference server URL. Default is '
'localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False,
default='HTTP',
help='Protocol (HTTP/gRPC) used to communicate with '
'inference service. Default is HTTP.')
parser.add_argument('--audio_filename', type=str, required=False,
default=None,
help='Input audio filename')
parser.add_argument('--data_dir', type=str, required=False,
default=None,
help='data directory')
parser.add_argument('--manifest_filename', type=str, required=False,
default=None,
help='relative manifest paths to --data_dir directory.')
FLAGS = parser.parse_args()
protocol = FLAGS.protocol.lower()
valid_model_platforms = {"ts-trace","onnx", "tensorrt"}
if FLAGS.model_platform not in valid_model_platforms:
raise ValueError("Invalid model_platform {}. Valid choices are {"
"}".format(FLAGS.model_platform,
valid_model_platforms))
model_name = "jasper-" + FLAGS.model_platform + "-ensemble"
speech_client = SpeechClient(
FLAGS.url, protocol, model_name, 1,
FLAGS.batch_size, model_platform=FLAGS.model_platform,
verbose=FLAGS.verbose, mode="synchronous",
from_features=False
)
filenames = []
transcripts = []
if FLAGS.audio_filename is not None:
audio_file = os.path.join(FLAGS.data_dir, FLAGS.audio_filename)
if os.path.isdir(audio_file):
filenames = glob.glob(os.path.join(os.path.abspath(audio_file), "**", "*.wav"),
recursive=True)
else:
filenames = [audio_file]
elif FLAGS.manifest_filename is not None:
filter_speed=1.0
data_dir=FLAGS.data_dir
labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"]
labels_map = dict([(labels[i], i) for i in range(len(labels))])
blank_index = len(labels)-1
table = None
import string
punctuation = string.punctuation
punctuation = punctuation.replace("+", "")
punctuation = punctuation.replace("&", "")
table = str.maketrans(punctuation, " " * len(punctuation))
import json
if "./triton" not in sys.path:
sys.path.append("./")
sys.path.append("./triton")
from speech_utils import normalize_string, parse_transcript
FLAGS.manifest_filename = FLAGS.manifest_filename.split(',')
for manifest in FLAGS.manifest_filename:
manifest=os.path.join(data_dir, manifest)
print(manifest)
with open(manifest, "r", encoding="utf-8") as fh:
a=json.load(fh)
for data in a:
files_and_speeds = data['files']
audio_path = [x['fname'] for x in files_and_speeds if x['speed'] == filter_speed][0]
filenames.append(os.path.join(data_dir, audio_path))
transcript_text = data['transcript']
transcript_text = normalize_string(transcript_text, labels=labels, table=table)
transcripts.append(transcript_text) #parse_transcript(transcript_text, labels_map, blank_index)) # convert to vocab indices
# Read the audio files
# Group requests in batches
audio_idx = 0
last_request = False
predictions = []
while not last_request:
batch_audio_samples = []
batch_filenames = []
for idx in range(FLAGS.batch_size):
filename = filenames[audio_idx]
print("Reading audio file: ", filename)
audio = AudioSegment.from_file(
filename,
offset=0, duration=FLAGS.fixed_size).samples
if FLAGS.fixed_size:
audio = np.resize(audio, FLAGS.fixed_size)
audio_idx = (audio_idx + 1) % len(filenames)
if audio_idx == 0:
last_request = True
batch_audio_samples.append(audio)
batch_filenames.append(filename)
predictions += speech_client.recognize(
batch_audio_samples,
batch_filenames)
if transcripts:
predictions = [x for l in predictions for x in l ]
from metrics import word_error_rate
wer, scores, num_words = word_error_rate(predictions, transcripts)
print(wer)
|
test_handler.py
|
#!/usr/bin/env python3
# pylint: disable=C0302
"""
Test the Lambda handler.
"""
# pylint: disable=C0103,C0111,R0904
from base64 import b32encode
from http.server import BaseHTTPRequestHandler, HTTPServer
from json import dumps as json_dumps, loads as json_loads
from logging import getLogger
from os import environ, urandom
from threading import Thread
from unittest import TestCase
from botocore.exceptions import ClientError as BotoClientError
import boto3
from moto import mock_iam
import rolemaker_server as rolemaker
# Fixes for Moto's unimplemented detach_role_policy API.
# https://github.com/spulec/moto/pull/1052
from moto.core.exceptions import RESTError # pylint: disable=C0412
from moto.iam.exceptions import IAMNotFoundException
from moto.iam.models import IAMBackend, iam_backend, ManagedPolicy, Role
from moto.iam.responses import IamResponse
def policy_detach_from_role(self, role):
self.attachment_count -= 1
del role.managed_policies[self.name]
ManagedPolicy.detach_from_role = policy_detach_from_role
def role_delete_policy(self, policy_name):
try:
del self.policies[policy_name]
except KeyError:
raise IAMNotFoundException(
"The role policy with name {0} cannot be found.".format(policy_name))
Role.delete_policy = role_delete_policy
class InvalidParameterError(RESTError):
code = 400
def __init__(self, message):
super(InvalidParameterError, self).__init__(
"InvalidParameterValue", message)
def role_put_policy(self, policy_name, policy_json):
if "TRIGGER_INVALID_JSON" in str(policy_json):
raise InvalidParameterError("Policy contains TRIGGER_INVALID_JSON")
self.policies[policy_name] = policy_json
Role.put_policy = role_put_policy
def backend_detach_role_policy(self, policy_arn, role_name):
arns = dict((p.arn, p) for p in self.managed_policies.values())
try:
policy = arns[policy_arn]
policy.detach_from_role(self.get_role(role_name))
except KeyError:
raise IAMNotFoundException("Policy {0} was not found.".format(policy_arn))
IAMBackend.detach_role_policy = backend_detach_role_policy
def backend_delete_role_policy(self, role_name, policy_name):
role = self.get_role(role_name)
role.delete_policy(policy_name)
IAMBackend.delete_role_policy = backend_delete_role_policy
DETACH_ROLE_POLICY_TEMPLATE = """\
<DetachRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DetachRolePolicyResponse>"""
def response_detach_role_policy(self):
policy_arn = self._get_param('PolicyArn') # pylint: disable=W0212
role_name = self._get_param('RoleName') # pylint: disable=W0212
iam_backend.detach_role_policy(policy_arn, role_name)
template = self.response_template(DETACH_ROLE_POLICY_TEMPLATE)
return template.render()
IamResponse.detach_role_policy = response_detach_role_policy
DELETE_ROLE_POLICY_TEMPLATE = """\
<DeleteRolePolicyResponse>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteRolePolicyResponse>"""
def response_delete_role_policy(self):
policy_name = self._get_param('PolicyName') # pylint: disable=W0212
role_name = self._get_param('RoleName') # pylint: disable=W0212
iam_backend.delete_role_policy(role_name, policy_name)
template = self.response_template(DELETE_ROLE_POLICY_TEMPLATE)
return template.render()
IamResponse.delete_role_policy = response_delete_role_policy
### End Moto fixup
def randstr(length=10):
"""
Return random letters/digits.
"""
return b32encode(urandom(length)).decode("ascii").rstrip("=")
class ResponseHandler(BaseHTTPRequestHandler):
"""
Handles S3 POSTs that the Lambda handler sends its results to.
"""
log = getLogger("http")
responses = []
def do_PUT(self):
content_length = self.headers.get("Content-Length")
if content_length is not None:
content_length = int(content_length)
data = self.rfile.read(content_length)
self.responses.append(data)
self.send_response(200, "")
self.send_header("Content-Length", "0")
self.send_header("Server", "AmazonS3")
self.end_headers()
return
def log_message(self, format, *args): # pylint: disable=W0622
"""
Log messages to the regular logging facility; BaseHTTPRequestHandler
forcibly prints them to stderr.
"""
self.log.info(format, *args)
OPEN_MANDATORY_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "*",
"Principal": "*",
}
}
POWER_USER_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"NotAction": "iam:*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["iam:Get*", "iam:List*"],
"Resource": "*"
}
]
}
S3_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "*"
}
]
}
BASIC_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "ec2.amazonaws.com"}
}
}
LAMBDA_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Principal": {"Service": "lambda.amazonaws.com"}
}
}
@mock_iam
class TestCustomResourceHandler(TestCase):
"""
Test CloudFormation Custom::RestrictedRole resource handling.
"""
mandatory_arn = ""
power_arn = ""
s3_arn = ""
@classmethod
def setUpClass(cls):
cls.server = HTTPServer(("127.0.0.1", 0), ResponseHandler)
cls.thread = Thread(target=cls.server.serve_forever)
cls.thread.start()
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
cls.thread.join()
return
def setUp(self):
self.iam = boto3.client("iam")
result = self.iam.create_policy(
PolicyName="Mandatory",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn = \
result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="Power",
PolicyDocument=json_dumps(POWER_USER_POLICY))
self.power_arn = result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="S3",
PolicyDocument=json_dumps(S3_POLICY))
self.s3_arn = result["Policy"]["Arn"]
ResponseHandler.responses = []
return
def invoke(self, ResourceType, RequestType="Create",
LogicalResourceId="LogicalResourceId", **kw):
sockname = self.server.socket.getsockname()
event = {
"StackId": "arn:aws:cloudformation:us-west-2:12345678:stack/stack-1234",
"RequestId": "req-1234",
"LogicalResourceId": LogicalResourceId,
"RequestType": RequestType,
"ResourceType": ResourceType,
"ResponseURL": "http://%s:%s/" % (sockname[0], sockname[1])
}
if "PhysicalResourceId" in kw:
event["PhysicalResourceId"] = kw.pop("PhysicalResourceId")
if "OldResourceProperties" in kw:
event["OldResourceProperties"] = kw.pop("OldResourceProperties")
event["ResourceProperties"] = kw
rolemaker.lambda_handler(event, None)
return json_loads(ResponseHandler.responses.pop())
def test_unknown_type(self):
result = self.invoke(ResourceType="Custom::Unknown")
self.assertEqual(result["Status"], "FAILED")
self.assertEqual(
result["Reason"],
"ClientError: An error occurred (InvalidParameterValue) when "
"calling the Unknown operation: Cannot handle CloudFormation "
"event Create Custom::Unknown")
def test_basic_create_delete(self):
role_name = "test-bc-%s" % randstr()
self.invoke(
ResourceType="Custom::RestrictedRole",
RoleName=role_name,
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY)
print(self.iam.list_roles())
role = self.iam.get_role(RoleName=role_name)
self.assertEqual(role["Role"]["RoleName"], role_name)
arp = role["Role"]["AssumeRolePolicyDocument"]
self.assertEqual(BASIC_ASSUME_ROLE_POLICY, arp)
self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Delete",
RoleName=role_name,
PhysicalResourceId=role_name)
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName=role_name)
def test_policy_updates(self):
create_props = {
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update1_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update2_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": S3_POLICY
}
]
}
update3_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.s3_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest2",
"PolicyDocument": S3_POLICY,
"BadPolicy": "yes"
}
]
}
update4_props = {
"AssumeRolePolicyDocument": LAMBDA_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [],
"Policies": [
{
"PolicyName": "jsontest2",
"PolicyDocument": {
"TRIGGER_INVALID_JSON": "Yes",
}
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.power_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**update1_props)
self.assertEqual("SUCCESS", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update1_props,
**update2_props)
self.assertEqual("SUCCESS", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
# Rollback due to invalid parameter
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update2_props,
**update3_props)
self.assertEqual("FAILED", response["Status"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=update2_props,
**update4_props)
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Policy contains TRIGGER_INVALID_JSON", response["Reason"])
self.iam.get_role(RoleName=role_name)
attached = self.iam.list_attached_role_policies(RoleName=role_name)[
"AttachedPolicies"]
self.assertEqual(len(attached), 2)
policy_arns = set([pol["PolicyArn"] for pol in attached])
self.assertEqual(policy_arns, {self.mandatory_arn, self.s3_arn})
inline = set(
self.iam.list_role_policies(RoleName=role_name)["PolicyNames"])
self.assertEqual(inline, {"strtest", "jsontest2"})
def test_name_change_updates(self):
create_props = {
"RoleName": "role1",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
good_update_props = {
"RoleName": "role2",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
bad_update_props = {
"RoleName": "role3",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [12345],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
self.assertEqual("SUCCESS", response["Status"])
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**good_update_props)
self.assertEqual("SUCCESS", response["Status"])
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=good_update_props,
**bad_update_props)
self.assertEqual("FAILED", response["Status"])
self.iam.get_role(RoleName=role_name)
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName="role3")
def test_no_update_path(self):
role_name = "test-nup-%s" % randstr()
create_props = {
"RoleName": role_name,
"Path": "/",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
update_props = {
"RoleName": role_name,
"Path": "/foo/",
"AssumeRolePolicyDocument": BASIC_ASSUME_ROLE_POLICY,
"ManagedPolicyArns": [self.power_arn],
"Policies": [
{
"PolicyName": "strtest",
"PolicyDocument": json_dumps(OPEN_MANDATORY_POLICY)
},
{
"PolicyName": "jsontest",
"PolicyDocument": OPEN_MANDATORY_POLICY
}
]
}
response = self.invoke(
ResourceType="Custom::RestrictedRole", **create_props)
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
self.assertEqual("SUCCESS", response["Status"])
response = self.invoke(
ResourceType="Custom::RestrictedRole",
RequestType="Update",
PhysicalResourceId=role_name,
OldResourceProperties=create_props,
**update_props)
self.assertEqual("FAILED", response["Status"])
self.assertIn("Cannot update the path to an existing role",
response["Reason"])
role_name = response["PhysicalResourceId"]
self.iam.get_role(RoleName=role_name)
def test_create_bad_inline_policy(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Inline policy missing PolicyName", response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo"}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Inline policy missing PolicyDocument",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": 1, "PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Invalid type for parameter PolicyName, value: 1, type "
"<class 'int'>, valid types: <class 'str'>",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "", "PolicyDocument": {}}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Invalid length for parameter PolicyName, value: 0",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo", "PolicyDocument": {}, "bar": 0}])
self.assertEqual("FAILED", response["Status"])
self.assertIn("Invalid inline policy parameter(s): bar",
response["Reason"])
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Policies=[{"PolicyName": "foo", "PolicyDocument": 1}])
self.assertEqual("FAILED", response["Status"])
self.assertIn(
"Invalid type for parameter PolicyDocument, value: 1, type "
"<class 'int'>, valid types: (<class 'str'>, <class 'dict'>)",
response["Reason"])
def test_create_missing_assume_role(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1")
self.assertEqual("FAILED", response["Status"])
self.assertIn("AssumeRolePolicyDocument is missing", response["Reason"])
def test_create_unknown_props(self):
response = self.invoke(
ResourceType="Custom::RestrictedRole", RoleName="role1",
AssumeRolePolicyDocument=BASIC_ASSUME_ROLE_POLICY,
Invalid=True)
self.assertEqual("FAILED", response["Status"])
self.assertIn("Unknown properties: Invalid", response["Reason"])
@mock_iam
class TestDirectInvoke(TestCase):
"""
Test direct Lambda invocation handling.
"""
mandatory_arn = ""
power_arn = ""
def setUp(self):
self.iam = boto3.client("iam")
result = self.iam.create_policy(
PolicyName="Mandatory",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn = \
result["Policy"]["Arn"]
result = self.iam.create_policy(
PolicyName="Power",
PolicyDocument=json_dumps(POWER_USER_POLICY))
self.power_arn = result["Policy"]["Arn"]
ResponseHandler.responses = []
def invoke(self, **kw): # pylint: disable=R0201
return rolemaker.lambda_handler(kw, None)
def test_basic_workflows(self):
role_name = "test-bw-%s" % randstr()
result = self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
self.assertIn("Role", result)
role = self.iam.get_role(RoleName=role_name)
self.assertEqual(role["Role"]["RoleName"], role_name)
arp = role["Role"]["AssumeRolePolicyDocument"]
self.assertEqual(BASIC_ASSUME_ROLE_POLICY, arp)
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="Assume",
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
result = self.iam.list_attached_role_policies(RoleName=role_name)
policy_arns = set([
policy["PolicyArn"] for policy in result["AttachedPolicies"]])
self.assertEqual(policy_arns, {self.mandatory_arn, self.power_arn})
result = self.iam.list_role_policies(RoleName=role_name)
self.assertEqual(result["PolicyNames"], ["Assume"])
result = self.iam.get_role_policy(
RoleName=role_name, PolicyName="Assume")
self.assertEqual(result["PolicyDocument"], BASIC_ASSUME_ROLE_POLICY)
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="Assume")
result = self.iam.list_attached_role_policies(RoleName=role_name)
policy_arns = set([
policy["PolicyArn"] for policy in result["AttachedPolicies"]])
self.assertEqual(
policy_arns, {environ["MANDATORY_ROLE_POLICY_ARN"]})
result = self.iam.list_role_policies(RoleName=role_name)
self.assertEqual(result["PolicyNames"], [])
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName=role_name,
PolicyDocument=json_dumps(LAMBDA_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
# Moto doesn't implement this yet.
if False: # pylint: disable=W0125
result = self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName=role_name,
Description="New Description")
self.assertNotIn("Error", result)
self.assertEqual(
"New Description",
self.iam.get_role(RoleName=role_name)["Role"]["Description"])
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertNotIn("Error", result)
with self.assertRaises(BotoClientError):
role = self.iam.get_role(RoleName=role_name)
def test_attempt_modify_nonrestricted(self):
role_name = "test-amn-%s" % randstr()
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(
"Role %s is not a restricted role" % role_name,
result["Error"]["Message"])
self.iam.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="foo", PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="foo")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName=role_name,
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName=role_name,
Description="Hello world")
check_result(result)
def test_attempt_detach_mandatory(self):
role_name = "test-dm-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=environ["MANDATORY_ROLE_POLICY_ARN"])
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Cannot detach the mandatory policy.",
result["Error"]["Message"])
def test_empty_rolename(self):
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(
"Invalid length for parameter RoleName, value: 0",
result["Error"]["Message"])
result = self.invoke(
Action="CreateRestrictedRole",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(
Action="CreateRestrictedRole",
RoleName="",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(result)
result = self.invoke(Action="DeleteRestrictedRole")
check_result(result)
result = self.invoke(Action="DeleteRestrictedRole", RoleName="")
check_result(result)
result = self.invoke(
Action="AttachRestrictedRolePolicy",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="AttachRestrictedRolePolicy",
RoleName="",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="DetachRestrictedRolePolicy",
RoleName="",
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess")
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription",
Description="This is a test description")
check_result(result)
result = self.invoke(
Action="UpdateRestrictedRoleDescription",
RoleName="",
Description="This is a test description")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="UpdateAssumeRestrictedRolePolicy",
RoleName="",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy",
PolicyName="Foo",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="PutRestrictedRolePolicy",
RoleName="",
PolicyName="Foo",
PolicyDocument="{}")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy",
PolicyName="Foo")
check_result(result)
result = self.invoke(
Action="DeleteRestrictedRolePolicy",
RoleName="",
PolicyName="Foo")
check_result(result)
def test_delete_nonexistent_role(self):
role_name = "test-dnr-%s" % randstr()
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "NoSuchEntity")
self.assertIn(
"Role %s not found" % role_name, result["Error"]["Message"])
def test_delete_nonexistent_attached_policy(self):
role_name = "test-dnap-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn="arn:aws:iam:::policy/nonexistent")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "NoSuchEntity")
self.assertIn(
"Policy arn:aws:iam:::policy/nonexistent was not found",
result["Error"]["Message"])
def test_attempt_delete_role_with_policies(self):
role_name = "test-drwp-%s" % randstr()
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "DeleteConflict")
self.assertIn("Cannot delete entity, must detach all policies first.",
result["Error"]["Message"])
result = self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=self.power_arn)
self.assertNotIn("Error", result)
result = self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1",
PolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "DeleteConflict")
self.assertIn("Cannot delete entity, must delete policies first.",
result["Error"]["Message"])
result = self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1")
self.assertNotIn("Error", result)
result = self.invoke(
Action="DeleteRestrictedRole", RoleName=role_name)
self.assertNotIn("Error", result)
def test_create_bad_parameters(self):
role_name = "test-cbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=1234,
AssumeRolePolicyDocument="{}", Path="/", Description="1"),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="", Path="/", Description="1"),
"Invalid length for parameter AssumeRolePolicyDocument, value: 0")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=1, Path="/", Description="1"),
"Invalid type for parameter AssumeRolePolicyDocument, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="{}", Path=1, Description="1"),
"Invalid type for parameter Path, value: 1")
check_result(
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument="{}", Path="/", Description=1),
"Invalid type for parameter Description, value: 1")
def test_attach_detach_bad_parameters(self):
role_name = "test-adbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
self.invoke(
Action="CreateRestrictedRole",
RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName="",
PolicyArn=self.power_arn),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=1,
PolicyArn=self.power_arn),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=""),
"Invalid length for parameter PolicyArn, value: 0")
check_result(
self.invoke(
Action="AttachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=1),
"Invalid type for parameter PolicyArn, value: 1")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName="",
PolicyArn=self.power_arn),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=1,
PolicyArn=self.power_arn),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=""),
"Invalid length for parameter PolicyArn, value: 0")
check_result(
self.invoke(
Action="DetachRestrictedRolePolicy", RoleName=role_name,
PolicyArn=1),
"Invalid type for parameter PolicyArn, value: 1")
def test_put_delete_bad_parameters(self):
role_name = "test-pdbp-%s" % randstr()
def check_result(result, message):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn(message, result["Error"]["Message"])
self.invoke(
Action="CreateRestrictedRole", RoleName=role_name,
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName="",
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=1,
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="",
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid length for parameter PolicyName, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName=1,
PolicyDocument=json_dumps(POWER_USER_POLICY)),
"Invalid type for parameter PolicyName, value: 1")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1", PolicyDocument=""),
"Invalid length for parameter PolicyDocument, value: 0")
check_result(
self.invoke(
Action="PutRestrictedRolePolicy", RoleName=role_name,
PolicyName="inline1", PolicyDocument=1),
"Invalid type for parameter PolicyDocument, value: 1")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName="",
PolicyName="inline1"),
"Invalid length for parameter RoleName, value: 0")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=1,
PolicyName="inline1"),
"Invalid type for parameter RoleName, value: 1")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName=""),
"Invalid length for parameter PolicyName, value: 0")
check_result(
self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName=role_name,
PolicyName=1),
"Invalid type for parameter PolicyName, value: 1")
def test_missing_environ(self):
def check_result(result):
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InternalFailure")
self.assertEqual(result["Error"]["Type"], "Receiver")
self.assertIn(
"Environment variable MANDATORY_ROLE_POLICY_ARN has not "
"been set on the Lambda function.",
result["Error"]["Message"])
result = self.invoke(
Action="CreateRestrictedRole", RoleName="ok-role",
AssumeRolePolicyDocument=json_dumps(OPEN_MANDATORY_POLICY))
self.assertNotIn("Error", result)
del environ["MANDATORY_ROLE_POLICY_ARN"]
try:
check_result(self.invoke(
Action="CreateRestrictedRole", RoleName="test-role-missing-env",
AssumeRolePolicyDocument="{}", Path="/", Description=""))
check_result(self.invoke(
Action="DeleteRestrictedRole", RoleName="ok-role"))
check_result(self.invoke(
Action="AttachRestrictedRolePolicy", RoleName="ok-role",
PolicyArn=self.power_arn))
check_result(self.invoke(
Action="DetachRestrictedRolePolicy", RoleName="ok-role",
PolicyArn=self.power_arn))
check_result(self.invoke(
Action="PutRestrictedRolePolicy", RoleName="ok-role",
PolicyName="inline1",
PolicyDocument=json_dumps(POWER_USER_POLICY)))
check_result(self.invoke(
Action="DeleteRestrictedRolePolicy", RoleName="ok-role",
PolicyName="inline1"))
check_result(self.invoke(
Action="UpdateRestrictedRoleDescription", RoleName="ok-role",
Description="A new description"))
check_result(self.invoke(
Action="UpdateAssumeRestrictedRolePolicy", RoleName="ok-role",
PolicyDocument=json_dumps(OPEN_MANDATORY_POLICY)))
finally:
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn
def test_bad_mandatory_policy(self):
invalid = "arn:aws:iam::aws:invalid-policy-name"
environ["MANDATORY_ROLE_POLICY_ARN"] = invalid
try:
result = self.invoke(
Action="CreateRestrictedRole", RoleName="test-role-bad-mand",
AssumeRolePolicyDocument="{}", Path="/", Description="")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InternalFailure")
self.assertEqual(result["Error"]["Type"], "Receiver")
self.assertIn(
"Unable to attach MANDATORY_ROLE_POLICY_ARN %s "
"to newly created role." % invalid, result["Error"]["Message"])
with self.assertRaises(BotoClientError):
self.iam.get_role(RoleName="test-role-bad-mand")
finally:
environ["MANDATORY_ROLE_POLICY_ARN"] = self.mandatory_arn
def test_delete_non_restricted_role(self):
self.iam.create_role(
RoleName="ok-role-non-restrict",
AssumeRolePolicyDocument=json_dumps(BASIC_ASSUME_ROLE_POLICY))
result = self.invoke(
Action="DeleteRestrictedRole", RoleName="ok-role-non-restrict")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Role ok-role-non-restrict is not a restricted "
"role.", result["Error"]["Message"])
def test_unknown_action(self):
result = self.invoke(Action="NotAnAction")
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidAction")
def test_unknown_parameters(self):
result = self.invoke(Action="CreateRestrictedRole", Invalid=1)
self.assertIn("Error", result)
self.assertEqual(result["Error"]["Code"], "InvalidParameterValue")
self.assertIn("Unknown parameter(s): Invalid",
result["Error"]["Message"])
|
_core.py
|
# minqlx - Extends Quake Live's dedicated server with extra functionality and scripting.
# Copyright (C) 2015 Mino <mino@minomino.org>
# This file is part of minqlx.
# minqlx is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# minqlx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with minqlx. If not, see <http://www.gnu.org/licenses/>.
# Since this isn't the actual module, we define it here and export
# it later so that it can be accessed with minqlx.__doc__ by Sphinx.
import minqlx
import minqlx.database
import collections
import subprocess
import threading
import traceback
import importlib
import datetime
import os.path
import logging
import shlex
import sys
import os
from logging.handlers import RotatingFileHandler
# em92: reasons not to support older than 3.5
# https://docs.python.org/3.5/whatsnew/3.5.html#whatsnew-ordereddict
# plugins already assume, that they are running on python >= 3.5
if sys.version_info < (3,5):
raise AssertionError("Only python 3.5 and later is supported by minqlx")
# Team number -> string
TEAMS = collections.OrderedDict(enumerate(("free", "red", "blue", "spectator")))
# Game type number -> string
GAMETYPES = collections.OrderedDict([(i, gt) for i, gt in enumerate(("Free for All", "Duel", "Race", "Team Deathmatch",
"Clan Arena", "Capture the Flag", "One Flag", "", "Harvester", "Freeze Tag", "Domination", "Attack and Defend",
"Red Rover")) if gt])
# Game type number -> short string
GAMETYPES_SHORT = collections.OrderedDict([(i, gt) for i, gt in enumerate(("ffa", "duel", "race", "tdm", "ca", "ctf",
"1f", "", "har", "ft", "dom", "ad", "rr")) if gt])
# Connection states.
CONNECTION_STATES = collections.OrderedDict(enumerate(("free", "zombie", "connected", "primed", "active")))
WEAPONS = collections.OrderedDict([(i, w) for i, w in enumerate(("", "g", "mg", "sg", "gl", "rl", "lg", "rg",
"pg", "bfg", "gh", "ng", "pl", "cg", "hmg", "hands")) if w])
DEFAULT_PLUGINS = ("plugin_manager", "essentials", "motd", "permission", "ban", "silence", "clan", "names", "log", "workshop")
# ====================================================================
# HELPERS
# ====================================================================
def parse_variables(varstr, ordered=False):
"""
Parses strings of key-value pairs delimited by "\\" and puts
them into a dictionary.
:param varstr: The string with variables.
:type varstr: str
:param ordered: Whether it should use :class:`collections.OrderedDict` or not.
:type ordered: bool
:returns: dict -- A dictionary with the variables added as key-value pairs.
"""
if ordered:
res = collections.OrderedDict()
else:
res = {}
if not varstr.strip():
return res
vars = varstr.lstrip("\\").split("\\")
try:
for i in range(0, len(vars), 2):
res[vars[i]] = vars[i + 1]
except IndexError:
# Log and return incomplete dict.
logger = minqlx.get_logger()
logger.warning("Uneven number of keys and values: {}".format(varstr))
return res
main_logger = None
def get_logger(plugin=None):
"""
Provides a logger that should be used by your plugin for debugging, info
and error reporting. It will automatically output to both the server console
as well as to a file.
:param plugin: The plugin that is using the logger.
:type plugin: minqlx.Plugin
:returns: logging.Logger -- The logger in question.
"""
if plugin:
return logging.getLogger("minqlx." + str(plugin))
else:
return logging.getLogger("minqlx")
def _configure_logger():
logger = logging.getLogger("minqlx")
logger.setLevel(logging.DEBUG)
# File
file_path = os.path.join(minqlx.get_cvar("fs_homepath"), "minqlx.log")
maxlogs = minqlx.Plugin.get_cvar("qlx_logs", int)
maxlogsize = minqlx.Plugin.get_cvar("qlx_logsSize", int)
file_fmt = logging.Formatter("(%(asctime)s) [%(levelname)s @ %(name)s.%(funcName)s] %(message)s", "%H:%M:%S")
file_handler = RotatingFileHandler(file_path, encoding="utf-8", maxBytes=maxlogsize, backupCount=maxlogs)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_fmt)
logger.addHandler(file_handler)
logger.info("============================= minqlx run @ {} ============================="
.format(datetime.datetime.now()))
# Console
console_fmt = logging.Formatter("[%(name)s.%(funcName)s] %(levelname)s: %(message)s", "%H:%M:%S")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(console_fmt)
logger.addHandler(console_handler)
def log_exception(plugin=None):
"""
Logs an exception using :func:`get_logger`. Call this in an except block.
:param plugin: The plugin that is using the logger.
:type plugin: minqlx.Plugin
"""
# TODO: Remove plugin arg and make it automatic.
logger = get_logger(plugin)
e = traceback.format_exc().rstrip("\n")
for line in e.split("\n"):
logger.error(line)
def handle_exception(exc_type, exc_value, exc_traceback):
"""A handler for unhandled exceptions."""
# TODO: If exception was raised within a plugin, detect it and pass to log_exception()
logger = get_logger(None)
e = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip("\n")
for line in e.split("\n"):
logger.error(line)
_init_time = datetime.datetime.now()
def uptime():
"""Returns a :class:`datetime.timedelta` instance of the time since initialized."""
return datetime.datetime.now() - _init_time
def owner():
"""Returns the SteamID64 of the owner. This is set in the config."""
try:
sid = int(minqlx.get_cvar("qlx_owner"))
if sid == -1:
raise RuntimeError
return sid
except:
logger = minqlx.get_logger()
logger.error("Failed to parse the Owner Steam ID. Make sure it's in SteamID64 format.")
_stats = None
def stats_listener():
"""Returns the :class:`minqlx.StatsListener` instance used to listen for stats."""
return _stats
def set_cvar_once(name, value, flags=0):
if minqlx.get_cvar(name) is None:
minqlx.set_cvar(name, value, flags)
return True
return False
def set_cvar_limit_once(name, value, minimum, maximum, flags=0):
if minqlx.get_cvar(name) is None:
minqlx.set_cvar_limit(name, value, minimum, maximum, flags)
return True
return False
def set_plugins_version(path):
args_version = shlex.split("git describe --long --tags --dirty --always")
args_branch = shlex.split("git rev-parse --abbrev-ref HEAD")
# We keep environment variables, but remove LD_PRELOAD to avoid a warning the OS might throw.
env = dict(os.environ)
del env["LD_PRELOAD"]
try:
# Get the version using git describe.
p = subprocess.Popen(args_version, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path, env=env)
p.wait(timeout=1)
if p.returncode != 0:
setattr(minqlx, "__plugins_version__", "NOT_SET")
return
version = p.stdout.read().decode().strip()
# Get the branch using git rev-parse.
p = subprocess.Popen(args_branch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=path, env=env)
p.wait(timeout=1)
if p.returncode != 0:
setattr(minqlx, "__plugins_version__", version)
return
branch = p.stdout.read().decode().strip()
except (FileNotFoundError, subprocess.TimeoutExpired):
setattr(minqlx, "__plugins_version__", "NOT_SET")
return
setattr(minqlx, "__plugins_version__", "{}-{}".format(version, branch))
def set_map_subtitles():
# We save the actual values before setting them so that we can retrieve them in Game.
setattr(minqlx, "_map_title", minqlx.get_configstring(3))
setattr(minqlx, "_map_subtitle1", minqlx.get_configstring(678))
setattr(minqlx, "_map_subtitle2", minqlx.get_configstring(679))
cs = minqlx.get_configstring(678)
if cs:
cs += " - "
minqlx.set_configstring(678, cs + "Running minqlx ^6{}^7 with plugins ^6{}^7."
.format(minqlx.__version__, minqlx.__plugins_version__))
cs = minqlx.get_configstring(679)
if cs:
cs += " - "
minqlx.set_configstring(679, cs + "Check ^6http://github.com/MinoMino/minqlx^7 for more details.")
# ====================================================================
# DECORATORS
# ====================================================================
def next_frame(func):
def f(*args, **kwargs):
minqlx.next_frame_tasks.append((func, args, kwargs))
return f
def delay(time):
"""Delay a function call a certain amount of time.
.. note::
It cannot guarantee you that it will be called right as the timer
expires, but unless some plugin is for some reason blocking, then
you can expect it to be called practically as soon as it expires.
:param func: The function to be called.
:type func: callable
:param time: The number of seconds before the function should be called.
:type time: int
"""
def wrap(func):
def f(*args, **kwargs):
minqlx.frame_tasks.enter(time, 0, func, args, kwargs)
return f
return wrap
_thread_count = 0
_thread_name = "minqlxthread"
def thread(func, force=False):
"""Starts a thread with the function passed as its target. If a function decorated
with this is called within a function also decorated, it will **not** create a second
thread unless told to do so with the *force* keyword.
:param func: The function to be ran in a thread.
:type func: callable
:param force: Force it to create a new thread even if already in one created by this decorator.
:type force: bool
:returns: threading.Thread
"""
def f(*args, **kwargs):
if not force and threading.current_thread().name.endswith(_thread_name):
func(*args, **kwargs)
else:
global _thread_count
name = func.__name__ + "-{}-{}".format(str(_thread_count), _thread_name)
t = threading.Thread(target=func, name=name, args=args, kwargs=kwargs, daemon=True)
t.start()
_thread_count += 1
return t
return f
# ====================================================================
# CONFIG AND PLUGIN LOADING
# ====================================================================
# We need to keep track of module instances for use with importlib.reload.
_modules = {}
class PluginLoadError(Exception):
pass
class PluginUnloadError(Exception):
pass
def load_preset_plugins():
plugins_temp = []
for p in minqlx.Plugin.get_cvar("qlx_plugins", list):
if p == "DEFAULT":
plugins_temp += list(DEFAULT_PLUGINS)
else:
plugins_temp.append(p)
plugins = []
for p in plugins_temp:
if p not in plugins:
plugins.append(p)
plugins_path = os.path.abspath(minqlx.get_cvar("qlx_pluginsPath"))
plugins_dir = os.path.basename(plugins_path)
if os.path.isdir(plugins_path):
plugins = [p for p in plugins if "{}.{}".format(plugins_dir, p)]
for p in plugins:
load_plugin(p)
else:
raise(PluginLoadError("Cannot find the plugins directory '{}'."
.format(os.path.abspath(plugins_path))))
def load_plugin(plugin):
logger = get_logger(None)
logger.info("Loading plugin '{}'...".format(plugin))
plugins = minqlx.Plugin._loaded_plugins
plugins_path = os.path.abspath(minqlx.get_cvar("qlx_pluginsPath"))
plugins_dir = os.path.basename(plugins_path)
if not os.path.isfile(os.path.join(plugins_path, plugin + ".py")):
raise PluginLoadError("No such plugin exists.")
elif plugin in plugins:
return reload_plugin(plugin)
try:
module = importlib.import_module("{}.{}".format(plugins_dir, plugin))
# We add the module regardless of whether it fails or not, otherwise we can't reload later.
global _modules
_modules[plugin] = module
if not hasattr(module, plugin):
raise(PluginLoadError("The plugin needs to have a class with the exact name as the file, minus the .py."))
plugin_class = getattr(module, plugin)
if issubclass(plugin_class, minqlx.Plugin):
plugins[plugin] = plugin_class()
else:
raise(PluginLoadError("Attempted to load a plugin that is not a subclass of 'minqlx.Plugin'."))
except:
log_exception(plugin)
raise
def unload_plugin(plugin):
logger = get_logger(None)
logger.info("Unloading plugin '{}'...".format(plugin))
plugins = minqlx.Plugin._loaded_plugins
if plugin in plugins:
try:
minqlx.EVENT_DISPATCHERS["unload"].dispatch(plugin)
# Unhook its hooks.
for hook in plugins[plugin].hooks:
plugins[plugin].remove_hook(*hook)
# Unregister commands.
for cmd in plugins[plugin].commands:
plugins[plugin].remove_command(cmd.name, cmd.handler)
del plugins[plugin]
except:
log_exception(plugin)
raise
else:
raise(PluginUnloadError("Attempted to unload a plugin that is not loaded."))
def reload_plugin(plugin):
try:
unload_plugin(plugin)
except PluginUnloadError:
pass
try:
global _modules
if plugin in _modules: # Unloaded previously?
importlib.reload(_modules[plugin])
load_plugin(plugin)
except:
log_exception(plugin)
raise
def initialize_cvars():
# Core
minqlx.set_cvar_once("qlx_owner", "-1")
minqlx.set_cvar_once("qlx_plugins", ", ".join(DEFAULT_PLUGINS))
minqlx.set_cvar_once("qlx_pluginsPath", "minqlx-plugins")
minqlx.set_cvar_once("qlx_database", "Redis")
minqlx.set_cvar_once("qlx_commandPrefix", "!")
minqlx.set_cvar_once("qlx_logs", "2")
minqlx.set_cvar_once("qlx_logsSize", str(3*10**6)) # 3 MB
# Redis
minqlx.set_cvar_once("qlx_redisAddress", "127.0.0.1")
minqlx.set_cvar_once("qlx_redisDatabase", "0")
minqlx.set_cvar_once("qlx_redisUnixSocket", "0")
minqlx.set_cvar_once("qlx_redisPassword", "")
# ====================================================================
# MAIN
# ====================================================================
def initialize():
minqlx.register_handlers()
def late_init():
"""Initialization that needs to be called after QLDS has finished
its own initialization.
"""
minqlx.initialize_cvars()
# Set the default database plugins should use.
# TODO: Make Plugin.database setting generic.
if minqlx.get_cvar("qlx_database").lower() == "redis":
minqlx.Plugin.database = minqlx.database.Redis
# Get the plugins path and set minqlx.__plugins_version__.
plugins_path = os.path.abspath(minqlx.get_cvar("qlx_pluginsPath"))
set_plugins_version(plugins_path)
# Initialize the logger now that we have fs_basepath.
_configure_logger()
logger = get_logger()
# Set our own exception handler so that we can log them if unhandled.
sys.excepthook = handle_exception
# Add the plugins path to PATH so that we can load plugins later.
sys.path.append(os.path.dirname(plugins_path))
logger.info("Loading preset plugins...")
load_preset_plugins()
if bool(int(minqlx.get_cvar("zmq_stats_enable"))):
global _stats
_stats = minqlx.StatsListener()
logger.info("Stats listener started on {}.".format(_stats.address))
# Start polling. Not blocking due to decorator magic. Aw yeah.
_stats.keep_receiving()
logger.info("We're good to go!")
|
main3.py
|
import sys
import random
import datetime
import asyncio
from threading import Thread
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication
from tornado.ioloop import IOLoop
from rx import Observable
from rx.subjects import Subject
from rx.concurrency import QtScheduler, AsyncIOScheduler
from client.client import Client
from client.client_window import ClientWindow
import utils
if __name__ == '__main__':
app = QApplication(sys.argv)
scheduler = QtScheduler(QtCore)
stock_prices = Subject()
client = Client(port='9999')
loop = asyncio.new_event_loop()
asyncio_scheduler = AsyncIOScheduler(loop)
def run_client():
asyncio.set_event_loop(loop)
asyncio.get_event_loop().call_soon(
utils.run_client_websocket, client, stock_prices)
asyncio.get_event_loop().run_forever()
thread = Thread(target=run_client, daemon=True)
thread.start()
client_window = ClientWindow(stock_prices_stream=stock_prices)
def send_order(order):
stock_order = {
'stock_symbol': order['symbol'],
'price': order['price'],
'direction': 'buy',
'quantity': 1,
}
utils.write_order(client, stock_order)
client_window.get_orders() \
.filter(utils.order_is_valid) \
.subscribe(send_order)
client_window.show()
sys.exit(app.exec_())
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_, is_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
import random
from sqlalchemy.testing.mock import Mock, call, patch, ANY
import weakref
import collections
join_timeout = 10
def MockDBAPI():
def cursor():
return Mock()
def connect(*arg, **kw):
return Mock(cursor=Mock(side_effect=cursor))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect),
shutdown=shutdown,
is_shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_threadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def test_threadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'invalidate', canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'soft_invalidate', canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close', canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'detach', canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close_detached', canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, 'first_connect')
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, 'connect')
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(.02)
c1.close()
time.sleep(.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect()]
)
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
return pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect())
c2 = self._with_teardown(p.connect())
c3 = self._with_teardown(p.connect())
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(
target=run_test, args=("success_one", p, False)),
threading.Thread(
target=run_test, args=("success_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_one", p, True)),
threading.Thread(
target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False))
]
for t in threads:
t.start()
time.sleep(.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[call("success_one"), call("success_two"),
call("overflow_two"), call("overflow_three"),
call("overflow_one")]
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect()
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(target=waiter,
args=(p, timeout, max_overflow))
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called _ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator,
pool_size=1, timeout=None,
max_overflow=0)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.start()
threads.append(t)
time.sleep(.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
c2.invalidate(soft=True)
assert c2_rec.connection is c2.connection
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
assert c3._connection_record is c2_rec
assert c2_rec.connection is c3.connection
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record,
pool, ref, echo, fairy=None):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback")
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy)
return patch.object(
pool, '_finalize_fairy', assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect())
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect())
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[
call.connect(ANY, ANY),
call.checkout(ANY, ANY, ANY)
]
)
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info['x'] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert 'x' in conn_rec.info
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(Error(), "statement", {},
Mock(), Mock())
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
#connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn, ))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1,
max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect()
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls([
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
datasets.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This dataset module supports various formats of datasets, including ImageNet, TFData,
MNIST, Cifar10/100, Manifest, MindRecord, and more. This module loads data with
high performance and parses data precisely. Some of the operations that are
provided to users to preprocess data include shuffle, batch, repeat, map, and zip.
"""
import atexit
import builtins
import glob
import json
import math
import os
import signal
import stat
import time
import uuid
import multiprocessing
from multiprocessing.pool import RUN, TERMINATE
from multiprocessing.util import Finalize
import queue
from enum import Enum
from functools import partial
from importlib import import_module
import sys
import threading
import copy
import weakref
import platform
import psutil
import numpy as np
from scipy.io import loadmat
from PIL import Image
import mindspore._c_dataengine as cde
from mindspore._c_expression import typing
from mindspore.common import Tensor
from mindspore import log as logger
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched
from mindspore.parallel._utils import _get_device_num
from mindspore.dataset.engine.offload import GetOffloadModel
import mindspore.dataset.transforms.py_transforms as py_transforms
from mindspore.dataset.text.utils import SentencePieceModel, DE_C_INTER_SENTENCEPIECE_MODE
from . import samplers
from .iterators import DictIterator, TupleIterator, DummyIterator, check_iterator_cleanup, _set_iterator_cleanup, \
ITERATORS_LIST, _unset_iterator_cleanup
from .queue import _SharedQueue
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
check_rename, check_numpyslicesdataset, check_device_send, check_take, check_project, check_imagefolderdataset, \
check_mnist_cifar_dataset, check_manifestdataset, check_tfrecorddataset, check_vocdataset, check_cocodataset, \
check_celebadataset, check_minddataset, check_generatordataset, check_sync_wait, check_zip_dataset, \
check_add_column, check_textfiledataset, check_concat, check_random_dataset, check_split, \
check_bucket_batch_by_length, check_cluedataset, check_save, check_csvdataset, check_paddeddataset, \
check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_flickr_dataset, \
check_sb_dataset, check_flowers102dataset, check_cityscapes_dataset, check_usps_dataset, check_div2k_dataset, \
check_sbu_dataset, check_qmnist_dataset, check_emnist_dataset, check_fake_image_dataset, check_places365_dataset, \
check_photo_tour_dataset, check_ag_news_dataset, check_dbpedia_dataset, check_lj_speech_dataset, \
check_yes_no_dataset, check_speech_commands_dataset, check_tedlium_dataset, check_svhn_dataset, \
check_stl10_dataset, check_yelp_review_dataset, check_penn_treebank_dataset, check_iwslt2016_dataset, \
check_iwslt2017_dataset, check_sogou_news_dataset, check_yahoo_answers_dataset, check_udpos_dataset, \
check_conll2000_dataset, check_amazon_review_dataset, check_semeion_dataset, check_caltech101_dataset, \
check_caltech256_dataset, check_wiki_text_dataset, check_imdb_dataset, check_wider_face_dataset
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
get_prefetch_size
from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist
from ..core.validator_helpers import replace_none
from ..core.py_util_helpers import ExceptionHandler
from ..transforms.py_transforms_util import FuncWrapper
try:
context = import_module("mindspore.context")
except ModuleNotFoundError:
context = None
if platform.system().lower() == "darwin" and multiprocessing.get_start_method() != "fork":
multiprocessing.set_start_method("fork", True)
OffloadToManualOffloadMode = {
None: cde.ManualOffloadMode.UNSPECIFIED,
False: cde.ManualOffloadMode.DISABLED,
True: cde.ManualOffloadMode.ENABLED
}
class Shuffle(str, Enum):
"""Specify the shuffle mode.
- GLOBAL: Shuffle both the files and samples.
- FILES: Shuffle files only.
- INFILE: Shuffle data within each file.
"""
GLOBAL: str = "global"
FILES: str = "files"
INFILE: str = "infile"
ShuffleToShuffleMode = {Shuffle.FILES: cde.ShuffleMode.FILES,
Shuffle.GLOBAL: cde.ShuffleMode.GLOBAL,
Shuffle.INFILE: cde.ShuffleMode.INFILE}
def shuffle_to_shuffle_mode(shuffle):
"""
Shuffle Enum to Shuffle Mode
Args:
shuffle (Shuffle): shuffle flag to shuffle mode in C layer
Returns:
ShuffleMode, shuffle mode
"""
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
else:
shuffle_mode = cde.ShuffleMode.FALSE # No shuffle
else:
shuffle_mode = ShuffleToShuffleMode[shuffle]
return shuffle_mode
def shuffle_to_bool(shuffle):
"""
Shuffle Enum to bool
Args:
shuffle (Shuffle): shuffle flag to bool
Returns:
bool, True / False
"""
shuffle_bool = True
if not isinstance(shuffle, Shuffle):
if shuffle is None:
shuffle_bool = None
elif shuffle:
shuffle_bool = True
else:
shuffle_bool = False
else:
shuffle_bool = True
return shuffle_bool
@check_zip
def zip(datasets):
"""
Zip the datasets in the input tuple of datasets.
Args:
datasets (tuple of class Dataset): A tuple of datasets to be zipped together.
The number of datasets must be more than 1.
Returns:
Dataset, dataset zipped.
Raises:
ValueError: If the number of datasets is 1.
TypeError: If datasets is not a tuple.
Examples:
>>> # Create a dataset which is the combination of dataset_1 and dataset_2
>>> dataset = ds.zip((dataset_1, dataset_2))
"""
if len(datasets) <= 1:
raise ValueError(
"Can't zip empty or just one dataset!")
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
return ZipDataset(datasets)
def _get_operator_process():
"""
Inner implemented method, mainly for passing sub-process id in C layer
Returns:
dict, mapping dict of operator id and corresponding process id.
"""
global _OP_PROCESS
process_info = _OP_PROCESS
op_process = dict()
keys = process_info.keys()
fetched_all = True
for key in keys:
op_process[key] = list(process_info[key][1])
item_full = (len(process_info[key][1]) == process_info[key][0])
fetched_all = fetched_all and item_full
return op_process, fetched_all
def _set_dataset_permissions(file_name, num_files):
"""
set saved dataset files' permissions to 600
the rule of dataset filenames should be the same as those in C++.
"""
num_digits = len(str(num_files - 1))
if num_files == 1:
paths = [file_name]
else:
paths = ["{}{}".format(file_name, str(x).rjust(num_digits, '0')) for x in range(num_files)]
for item in paths:
if os.path.exists(item):
os.chmod(item, stat.S_IRUSR | stat.S_IWUSR)
index_file = item + ".db"
if os.path.exists(index_file):
os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR)
class Dataset:
"""
Abstract class to represent a dataset in DataEngine's data pipeline.
This class is the base class of SourceDataset and Dataset, and represents
a node in the data flow graph.
Args:
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel
(default=None).
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
# Note: children and parent are internal variables, not recommended for external using.
self.children = replace_none(children, [])
if isinstance(self.children, tuple):
self.children = list(self.children)
if not isinstance(self.children, list):
self.children = [self.children]
self.parent = []
for child in self.children:
child.parent.append(weakref.ref(self))
self.num_parallel_workers = num_parallel_workers
self.cache = cache
self._device_iter = 0
self._input_indexs = ()
self.saved_output_types = None
self.saved_output_shapes = None
self.dynamic_setting = [False, None]
self.saved_min_shapes = None
self.saved_max_shapes = None
self._col_names = None
self.dataset_size = None
self._batch_size = None
self._num_classes = None
self._repeat_count = None
self._class_indexing = None
self._sync = False
def create_ir_tree(self):
"""
Internal method to build an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
Dataset, the root dataset of the IR tree.
"""
parent = self.parent
self.parent = []
dataset = copy.deepcopy(self)
global _OP_NAME
_OP_NAME = Dataset._get_operator_id(dataset)
ir_tree = dataset.parse_tree()
self.parent = parent
_init_device_info()
return ir_tree, dataset
def close_pool(self):
"""
Close multiprocessing pool in dataset. If you are familiar with multiprocessing library, you can regard this
as a destructor for a processingPool object.
"""
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
for child in self.children:
child.close_pool()
def notify_watchdog(self):
"""
Close watchdog thread in dataset. Now GeneratorDataset/map/batch will use a thread named watch_dog to monitor
multiprocess, for get_dataset_size/output_shapes/output_types/get_col_name/num_classes, we need notify_watchdog
to close watch_dog thread manually.
"""
if hasattr(self, 'sample_fn') and self.sample_fn is not None:
if self.sample_fn.multi_process:
self.sample_fn._abort_watchdog() # pylint: disable=W0212
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
for child in self.children:
child.notify_watchdog()
@staticmethod
def _get_operator_id(dataset):
"""
Internal method to iterate the tree and obtain op_id of each operator.
Returns:
Dataset, the root dataset of the tree.
"""
op_name = dict()
generator_process = dict()
op_name[str(dataset)] = 0
op_id = 1
def process_name(datasets, operator_id):
if not datasets:
return 0
temp = []
for item in datasets:
for d in item.children:
temp.append(d)
op_name[str(d)] = operator_id
if isinstance(d, GeneratorDataset) and d.sample_fn and d.sample_fn.pids:
generator_process[operator_id] = [d.num_parallel_workers, set(d.sample_fn.pids)]
operator_id = operator_id + 1
return process_name(temp, operator_id)
process_name([dataset], op_id)
if generator_process:
global _OP_PROCESS
_OP_PROCESS.update(generator_process)
return op_name
def parse_tree(self):
"""
Internal method to parse the API tree into an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
"""
if len(self.parent) > 1:
raise ValueError("The data pipeline is not a tree (i.e., one node has 2 consumers)")
ir_children = [d.parse_tree() for d in self.children]
# Bootstrap can only be performed on a copy of the original dataset node.
# Bootstrap on original dataset node will make all iterators share the same process pool
self.iterator_bootstrap()
ir_node = self.parse(ir_children)
ir_node = self.post_parse(ir_node)
return ir_node
def __safe_deepcopy__(self, memodict, exclude=()):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
for arg, value in self.__dict__.items():
if arg in exclude:
setattr(new_op, arg, value)
else:
try:
setattr(new_op, arg, copy.deepcopy(value, memodict))
except TypeError:
setattr(new_op, arg, value)
return new_op
def iterator_bootstrap(self):
pass
@staticmethod
def _noop_mode():
if _is_role_sched() or _is_role_pserver():
return True
return False
def __add__(self, datasets):
return self.concat(datasets)
def to_json(self, filename=""):
"""
Serialize a pipeline into JSON string and dump into file if filename is provided.
Args:
filename (str): filename of JSON file to be saved as (default="").
Returns:
str, JSON string of the pipeline.
"""
ir_tree, _ = self.create_ir_tree()
return json.loads(ir_tree.to_json(filename))
@check_bucket_batch_by_length
def bucket_batch_by_length(self, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function=None,
pad_info=None, pad_to_bucket_boundary=False, drop_remainder=False):
"""
Bucket elements according to their lengths. Each bucket will be padded and batched when
they are full.
A length function is called on each row in the dataset. The row is then
bucketed based on its length and bucket boundaries. When a bucket reaches its
corresponding size specified in bucket_batch_sizes, the entire bucket will be
padded according to pad_info, and then form a batch.
Each batch will be full, except one special case: the last batch for each bucket may not be full.
Args:
column_names (list[str]): Columns passed to element_length_function.
bucket_boundaries (list[int]): A list consisting of the upper boundaries
of the buckets. Must be strictly increasing. If there are n boundaries,
n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one
bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each
0<i<n-1, and the last bucket for [bucket_boundaries[n-1], inf).
bucket_batch_sizes (list[int]): A list consisting of the batch sizes for
each bucket. Must contain len(bucket_boundaries)+1 elements.
element_length_function (Callable, optional): A function that takes in
M arguments where M = len(column_names) and returns an integer. If no value
provided, parameter M the len(column_names) must be 1, and the size of the first
dimension of that column will be taken as the length (default=None).
pad_info (dict, optional): The information about how to batch each column. The key
corresponds to the column name, and the value must be a tuple of 2 elements.
The first element corresponds to the shape to pad to, and the second
element corresponds to the value to pad with. If a column is not
specified, then that column will be padded to the longest in the current
batch, and 0 will be used as the padding value. Any None dimensions will
be padded to the longest in the current batch, unless if
pad_to_bucket_boundary is True. If no padding is wanted, set pad_info
to None (default=None).
pad_to_bucket_boundary (bool, optional): If True, will pad each None
dimension in pad_info to the bucket_boundary minus 1. If there are any
elements that fall into the last bucket, an error will occur
(default=False).
drop_remainder (bool, optional): If True, will drop the last batch for each
bucket if it is not a full batch (default=False).
Returns:
Dataset, dataset bucketed and batched by length.
Examples:
>>> # Create a dataset where certain counts rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> import numpy as np
>>> def generate_2_columns(n):
... for i in range(n):
... yield (np.array([i]), np.array([j for j in range(i + 1)]))
>>>
>>> column_names = ["col1", "col2"]
>>> dataset = ds.GeneratorDataset(generate_2_columns(8), column_names)
>>> bucket_boundaries = [5, 10]
>>> bucket_batch_sizes = [2, 1, 1]
>>> element_length_function = (lambda col1, col2: max(len(col1), len(col2)))
>>> # Will pad col2 to shape [bucket_boundaries[i]] where i is the
>>> # index of the bucket that is currently being batched.
>>> pad_info = {"col2": ([None], -1)}
>>> pad_to_bucket_boundary = True
>>> dataset = dataset.bucket_batch_by_length(column_names, bucket_boundaries,
... bucket_batch_sizes,
... element_length_function, pad_info,
... pad_to_bucket_boundary)
"""
return BucketBatchByLengthDataset(self, column_names, bucket_boundaries, bucket_batch_sizes,
element_length_function, pad_info, pad_to_bucket_boundary, drop_remainder)
@check_batch
def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
"""
Combine batch_size number of consecutive rows into batches.
For any child node, a batch is treated as a single row.
For any column, all the elements within that column must have the same shape.
If a per_batch_map callable is provided, it will be applied to the batches of tensors.
Note:
The order of using repeat and batch reflects the number of batches and per_batch_map.
It is recommended that the repeat operation applied after the batch operation finished.
Args:
batch_size (int or function): The number of rows each batch is created with. An
int or callable object which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last block
whose data row number is less than batch size (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers(threads) to process the dataset in parallel
(default=None).
per_batch_map (callable, optional): Per batch map callable (default=None). A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch
of Tensors on a given column. The number of lists should match with the number of entries in
input_columns. The last parameter of the callable should always be a BatchInfo object. Per_batch_map
should return (list[Tensor], list[Tensor], ...). The length of each list in output should be the same as
the input. output_columns is required if the number of output lists is different from input.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list
should match with signature of per_batch_map callable (default=None).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns
outputted by the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset (default=None). The parameter is required when len(input_column) != len(output_column).
Caution: the list here is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0
(default=None).
python_multiprocessing (bool, optional): Parallelize Python function per_batch_map with multi-processing.
This option could be beneficial if the function is computational heavy (default=False).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Returns:
BatchDataset, dataset batched.
Examples:
>>> # Create a dataset where every 100 rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> dataset = dataset.batch(100, True)
>>> # resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
>>> def np_resize(col, batchInfo):
... output = col.copy()
... s = (batchInfo.get_batch_num() + 1) ** 2
... index = 0
... for c in col:
... img = Image.fromarray(c.astype('uint8')).convert('RGB')
... img = img.resize((s, s), Image.ANTIALIAS)
... output[index] = np.array(img)
... index += 1
... return (output,)
>>> dataset = dataset.batch(batch_size=8, input_columns=["image"], per_batch_map=np_resize)
"""
return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,
output_columns, column_order, pad_info, python_multiprocessing, max_rowsize)
@check_sync_wait
def sync_wait(self, condition_name, num_batch=1, callback=None):
"""
Add a blocking condition to the input Dataset. A synchronize action will be applied.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (int): the number of batches without blocking at the start of each epoch (default=1).
callback (function): The callback function that will be invoked when sync_update is called (default=None).
Returns:
SyncWaitDataset, dataset added a blocking condition.
Raises:
RuntimeError: If condition name already exists.
Examples:
>>> import numpy as np
>>> def gen():
... for i in range(100):
... yield (np.array(i),)
>>>
>>> class Augment:
... def __init__(self, loss):
... self.loss = loss
...
... def preprocess(self, input_):
... return input_
...
... def update(self, data):
... self.loss = data["loss"]
>>>
>>> batch_size = 4
>>> dataset = ds.GeneratorDataset(gen, column_names=["input"])
>>>
>>> aug = Augment(0)
>>> dataset = dataset.sync_wait(condition_name="policy", callback=aug.update)
>>> dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"])
>>> dataset = dataset.batch(batch_size)
>>> count = 0
>>> for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
... assert data["input"][0] == count
... count += batch_size
... data = {"loss": count}
... dataset.sync_update(condition_name="policy", data=data)
"""
return SyncWaitDataset(self, condition_name, num_batch, callback)
@check_shuffle
def shuffle(self, buffer_size):
"""
Randomly shuffles the rows of this dataset using the following policy:
1. Make a shuffle buffer that contains the first buffer_size rows.
2. Randomly select an element from the shuffle buffer to be the next row
propagated to the child node.
3. Get the next row (if any) from the parent node and put it in the shuffle buffer.
4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.
A random seed can be provided to be used on the first epoch. In every subsequent
epoch, the seed is changed to a new one, randomly generated value.
Args:
buffer_size (int): The size of the buffer (must be larger than 1) for
shuffling. Setting buffer_size equal to the number of rows in the entire
dataset will result in a global shuffle.
Returns:
Dataset, dataset shuffled.
Raises:
RuntimeError: If exist sync operators before shuffle.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Optionally set the seed for the first epoch
>>> ds.config.set_seed(58)
>>> # Create a shuffled dataset using a shuffle buffer of size 4
>>> dataset = dataset.shuffle(4)
"""
return ShuffleDataset(self, buffer_size)
def flat_map(self, func):
"""
Map `func` to each row in dataset and flatten the result.
The specified `func` is a function that must take one 'Ndarray' as input
and return a 'Dataset'.
Args:
func (function): A function that must take one 'Ndarray' as an argument and
return a 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # use NumpySlicesDataset as an example
>>> dataset = ds.NumpySlicesDataset([[0, 1], [2, 3]])
>>>
>>> def flat_map_func(array):
... # create a NumpySlicesDataset with the array
... dataset = ds.NumpySlicesDataset(array)
... # repeat the dataset twice
... dataset = dataset.repeat(2)
... return dataset
>>>
>>> dataset = dataset.flat_map(flat_map_func)
>>> # [[0, 1], [0, 1], [2, 3], [2, 3]]
Raises:
TypeError: If `func` is not a function.
TypeError: If `func` doesn't return a Dataset.
"""
dataset = None
if not hasattr(func, '__call__'):
logger.critical("func must be a function.")
raise TypeError("func must be a function.")
for row_data in self.create_tuple_iterator(output_numpy=True):
if dataset is None:
dataset = func(row_data)
else:
dataset += func(row_data)
if not isinstance(dataset, Dataset):
logger.critical("flat_map must return a Dataset object.")
raise TypeError("flat_map must return a Dataset object.")
return dataset
@check_map
def map(self, operations, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None,
max_rowsize=16, offload=None):
"""
Apply each operation in operations to this dataset.
The order of operations is determined by the position of each operation in the operations parameter.
operations[0] will be applied first, then operations[1], then operations[2], etc.
Each operation will be passed one or more columns from the dataset as input, and zero or
more columns will be outputted. The first operation will be passed the columns specified
in input_columns as input. If there is more than one operator in operations, the outputted
columns of the previous operation are used as the input columns for the next operation.
The columns outputted by the very last operation will be assigned names specified by
output_columns.
Only the columns specified in column_order will be propagated to the child node. These
columns will be in the same order as specified in column_order.
Args:
operations (Union[list[TensorOp], list[functions]]): List of operations to be
applied on the dataset. Operations are applied in the order they appear in this list.
input_columns (Union[str, list[str]], optional): List of the names of the columns that will be passed to
the first operation as input. The size of this list must match the number of
input columns expected by the first operator. (default=None, the first
operation will be passed however many columns that are required, starting from
the first column).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset (default=None). The parameter is required when len(input_column) != len(output_column).
Caution: the list here is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of threads used to process the dataset in
parallel (default=None, the value from the configuration will be used).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker processes. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
max_rowsize (int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (Default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=None).
Note:
- Input `operations` mainly accept c_transforms, py_transforms operator in mindspore.dataset part, plus user
defined Python function(PyFuncs).
- Do not add network computing operators from mindspore.nn and mindspore.ops or others into this
`operations`.
Returns:
Dataset, dataset after mapping operation.
Examples:
>>> # dataset is an instance of Dataset which has 2 columns, "image" and "label".
>>>
>>> # Define two operations, where each operation accepts 1 input column and outputs 1 column.
>>> decode_op = c_vision.Decode(rgb=True)
>>> random_jitter_op = c_vision.RandomColorAdjust(brightness=(0.8, 0.8), contrast=(1, 1),
... saturation=(1, 1), hue=(0, 0))
>>>
>>> # 1) Simple map example.
>>>
>>> # Apply decode_op on column "image". This column will be replaced by the outputted
>>> # column of decode_op. Since column_order is not provided, both columns "image"
>>> # and "label" will be propagated to the child node in their original order.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"])
>>>
>>> # Decode and rename column "image" to "decoded_image".
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"], output_columns=["decoded_image"])
>>>
>>> # Specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=None, column_order=["label", "image"])
>>>
>>> # Rename column "image" to "decoded_image" and also specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["label", "decoded_image"])
>>>
>>> # Rename column "image" to "decoded_image" and keep only this column.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["decoded_image"])
>>>
>>> # A simple example for mapping pyfunc. Renaming columns and specifying column order
>>> # work in the same way as the previous examples.
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x + 1)], input_columns=["data"])
>>>
>>> # 2) Map example with more than one operation.
>>>
>>> # Create a dataset where the images are decoded, then randomly color jittered.
>>> # decode_op takes column "image" as input and outputs one column. The column
>>> # outputted by decode_op is passed as input to random_jitter_op.
>>> # random_jitter_op will output one column. Column "image" will be replaced by
>>> # the column outputted by random_jitter_op (the very last operation). All other
>>> # columns are unchanged. Since column_order is not specified, the order of the
>>> # columns will remain the same.
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"])
>>>
>>> # Rename the column outputted by random_jitter_op to "image_mapped".
>>> # Specifying column order works in the same way as examples in 1).
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"],
... output_columns=["image_mapped"])
>>>
>>> # Map with multiple operations using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as examples in 1).
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x * x), (lambda x: x - 1)], input_columns=["data"],
... output_columns=["data_mapped"])
>>>
>>> # 3) Example where number of input columns is not equal to number of output columns.
>>>
>>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.
>>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.
>>> # operations[2] is a lambda that takes 1 column as input and outputs 4 columns.
>>> #
>>> # Note: The number of output columns of operation[i] must equal the number of
>>> # input columns of operation[i+1]. Otherwise, this map call will also result
>>> # in an error.
>>> operations = [(lambda x, y: (x, x + y, x + y + 1)),
... (lambda x, y, z: x * y * z),
... (lambda x: (x % 2, x % 3, x % 5, x % 7))]
>>>
>>> # Note: Since the number of input columns is not the same as the number of
>>> # output columns, the output_columns and column_order parameters must be
>>> # specified. Otherwise, this map call will also result in an error.
>>>
>>> dataset = ds.NumpySlicesDataset(data=([[0, 1, 2]], [[3, 4, 5]]), column_names=["x", "y"])
>>>
>>> # Propagate all columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod2", "mod3", "mod5", "mod7"])
>>>
>>> # Propagate some columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod7", "mod3", "col2"])
"""
if hasattr(self, 'operator_mixed') and getattr(self, 'operator_mixed') is True:
num_parallel_workers = 1
logger.warning(
"Input 'operations' of 'map' includes network computing operators like in mindspore.nn, mindspore.ops, "
"mindspore.numpy module and etc, which do not support multi-thread compiling, recommend to replace it "
"with python implemented operator like numpy etc. Here decrease 'num_parallel_workers' into 1.")
return MapDataset(self, operations, input_columns, output_columns, column_order, num_parallel_workers,
python_multiprocessing, cache, callbacks, max_rowsize, offload)
@check_filter
def filter(self, predicate, input_columns=None, num_parallel_workers=None):
"""
Filter dataset by prediction.
Note:
If input_columns not provided or provided with empty, all columns will be used.
Args:
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns, when
default=None, the predicate will be applied on all columns in the dataset.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
Returns:
Dataset, dataset filtered.
Examples:
>>> # generator data(0 ~ 63)
>>> # filter the data that greater than or equal to 11
>>> dataset = dataset.filter(predicate=lambda data: data < 11, input_columns = ["data"])
"""
return FilterDataset(self, predicate, input_columns, num_parallel_workers)
@check_repeat
def repeat(self, count=None):
"""
Repeat this dataset `count` times. Repeat infinitely if the count is None or -1.
Note:
The order of using repeat and batch reflects the number of batches. It is recommended that
the repeat operation is used after the batch operation.
Args:
count (int): Number of times the dataset is going to be repeated (default=None).
Returns:
Dataset, dataset repeated.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Create a dataset where the dataset is repeated for 50 epochs
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where each epoch is shuffled individually
>>> dataset = dataset.shuffle(10)
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where the dataset is first repeated for
>>> # 50 epochs before shuffling. The shuffle operator will treat
>>> # the entire 50 epochs as one big dataset.
>>> dataset = dataset.repeat(50)
>>> dataset = dataset.shuffle(10)
"""
return RepeatDataset(self, count)
@check_skip
def skip(self, count):
"""
Skip the first N elements of this dataset.
Args:
count (int): Number of elements in the dataset to be skipped.
Returns:
Dataset, dataset that containing rows like origin rows subtract skipped rows.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset which skips first 3 elements from data
>>> dataset = dataset.skip(3)
"""
return SkipDataset(self, count)
@check_take
def take(self, count=-1):
"""
Takes at most given numbers of elements from the dataset.
Note:
1. If count is greater than the number of elements in the dataset or equal to -1,
all the elements in dataset will be taken.
2. The order of using take and batch matters. If take is before batch operation,
then take the given number of rows; otherwise take the given number of batches.
Args:
count (int, optional): Number of elements to be taken from the dataset (default=-1).
Returns:
Dataset, dataset taken.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset where the dataset includes 50 elements.
>>> dataset = dataset.take(50)
"""
return TakeDataset(self, count)
def _get_absolute_split_sizes(self, sizes):
"""
Internal method called by split to calculate absolute split sizes and to
do some error checking after calculating absolute split sizes.
Returns:
int, absolute split sizes of the dataset.
"""
# Call get_dataset_size here and check input here because
# don't want to call this once in check_split and another time in
# here again
dataset_size = self.get_dataset_size()
if dataset_size is None or dataset_size <= 0:
raise RuntimeError("dataset_size is unknown, unable to split.")
if not isinstance(sizes, list):
raise RuntimeError("sizes must be a list.")
all_int = all(isinstance(item, int) for item in sizes)
if all_int:
sizes_sum = sum(sizes)
if sizes_sum != dataset_size:
raise RuntimeError("Sum of split sizes {} is not equal to dataset size {}."
.format(sizes_sum, dataset_size))
return sizes
absolute_sizes = []
for item in sizes:
absolute_size = int(round(item * dataset_size))
if absolute_size == 0:
raise RuntimeError("Split percentage {} is too small.".format(item))
absolute_sizes.append(absolute_size)
absolute_sizes_sum = sum(absolute_sizes)
# if we still need more rows, give them to the first split.
# if we have too many rows, remove the extras from the first split that has
# enough rows.
size_difference = int(dataset_size - absolute_sizes_sum)
if size_difference > 0:
absolute_sizes[0] += size_difference
else:
for i, _ in enumerate(absolute_sizes):
if absolute_sizes[i] + size_difference > 0:
absolute_sizes[i] += size_difference
break
if sum(absolute_sizes) != dataset_size:
raise RuntimeError("Sum of calculated split sizes {} is not equal to dataset size {}."
.format(absolute_sizes_sum, dataset_size))
return absolute_sizes
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
This is a general purpose split function which can be called from any operator in the pipeline.
There is another, optimized split function, which will be called automatically if ds.split is
called where ds is a MappableDataset.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all input sizes does not equal the original dataset size, an
error will throw.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will throw. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference of K - sigma(round(fi * k)) will be added to the first
split.
- The sum of split sizes > K, the difference of sigma(round(fi * K)) - K will be removed from the first
large enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset cannot be sharded if split is going to be called.
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # TextFileDataset is not a mappable dataset, so this non-optimized split will be called.
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.TextFileDataset(text_file_dataset_dir, shuffle=False)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
rows_to_skip = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split
# in alter_tree, shuffle buffer is minimum 10000, so use 10000 here
ds = ds.shuffle(10000)
ds.reshuffle_each_epoch = False
if rows_to_skip > 0:
ds = ds.skip(rows_to_skip)
ds = ds.take(size)
splits.append(ds)
rows_to_skip += size
return tuple(splits)
@check_zip_dataset
def zip(self, datasets):
"""
Zip the datasets in the sense of input tuple of datasets. Columns in the input datasets must have different
name.
Args:
datasets (Union[tuple, class Dataset]): A tuple of datasets or a single class Dataset
to be zipped together with this dataset.
Returns:
Dataset, dataset zipped.
Examples:
>>> # Create a dataset which is the combination of dataset and dataset_1
>>> dataset = dataset.zip(dataset_1)
"""
if isinstance(datasets, tuple):
datasets = (self, *datasets)
elif isinstance(datasets, Dataset):
datasets = (self, datasets)
else:
raise TypeError("Invalid datasets, expected Dataset object or tuple of Dataset, but got %s!" % datasets)
return ZipDataset(datasets)
@check_concat
def concat(self, datasets):
"""
Concatenate the dataset objects in the input list.
Performing "+" operation on dataset objects can achieve the same effect.
Note:
The column name, and rank and type of the column data must be the same in the input datasets.
Args:
datasets (Union[list, class Dataset]): A list of datasets or a single class Dataset
to be concatenated together with this dataset.
Returns:
Dataset, dataset concatenated.
Examples:
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with "+" operator
>>> dataset = dataset_1 + dataset_2
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with concat operation
>>> dataset = dataset_1.concat(dataset_2)
"""
if isinstance(datasets, Dataset):
datasets = [self] + [datasets]
elif isinstance(datasets, list):
datasets = [self] + datasets
else:
raise TypeError("Invalid datasets, expected Dataset object or list of Dataset, but got %s!" % datasets)
return ConcatDataset(datasets)
@check_rename
def rename(self, input_columns, output_columns):
"""
Rename the columns in input datasets.
Args:
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
Returns:
Dataset, dataset renamed.
Examples:
>>> # dataset is an instance object of Dataset
>>> input_columns = ["input_col1", "input_col2", "input_col3"]
>>> output_columns = ["output_col1", "output_col2", "output_col3"]
>>>
>>> # Create a dataset where input_col1 is renamed to output_col1, and
>>> # input_col2 is renamed to output_col2, and input_col3 is renamed
>>> # to output_col3.
>>> dataset = dataset.rename(input_columns=input_columns, output_columns=output_columns)
"""
return RenameDataset(self, input_columns, output_columns)
@check_project
def project(self, columns):
"""
Project certain columns in input dataset.
The specified columns will be selected from the dataset and passed into
the pipeline with the order specified. The other columns are discarded.
Args:
columns(Union[str, list[str]]): List of names of the columns to project.
Returns:
Dataset, dataset projected.
Examples:
>>> # dataset is an instance object of Dataset
>>> columns_to_project = ["column3", "column1", "column2"]
>>>
>>> # Create a dataset that consists of column3, column1, column2
>>> # in that order, regardless of the original order of columns.
>>> dataset = dataset.project(columns=columns_to_project)
"""
return ProjectDataset(self, columns)
def apply(self, apply_func):
"""
Apply a function in this dataset.
Args:
apply_func (function): A function that must take one 'Dataset' as an argument and
return a preprocessed 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Declare an apply_func function which returns a Dataset object
>>> def apply_func(data):
... data = data.batch(2)
... return data
>>>
>>> # Use apply to call apply_func
>>> dataset = dataset.apply(apply_func)
Raises:
TypeError: If apply_func is not a function.
TypeError: If apply_func doesn't return a Dataset.
"""
if not hasattr(apply_func, '__call__'):
raise TypeError("apply_func must be a function.")
dataset = apply_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("apply_func must return a dataset.")
return dataset
@check_device_send
def device_que(self, send_epoch_end=True, create_data_info_queue=False):
"""
Return a transferred Dataset that transfers data through a device.
Args:
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Returns:
Dataset, dataset for transferring.
"""
return self.to_device(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)
@check_device_send
def to_device(self, send_epoch_end=True, create_data_info_queue=False):
"""
Transfer data from CPU to GPU or Ascend or other devices.
Args:
send_epoch_end (bool, optional): Whether to send the end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per second is 256M.
Returns:
TransferDataset, dataset for transferring.
Raises:
RuntimeError: If distribution file path is given but failed to read.
"""
return TransferDataset(self, send_epoch_end, create_data_info_queue)
@check_save
def save(self, file_name, num_files=1, file_type='mindrecord'):
"""
Save the dynamic data processed by the dataset pipeline in common dataset format.
Supported dataset formats: 'mindrecord' only
Implicit type casting exists when saving data as 'mindrecord'. The transform table shows how to do type casting.
.. list-table:: Implicit Type Casting when Saving as 'mindrecord'
:widths: 25 25 50
:header-rows: 1
* - Type in 'dataset'
- Type in 'mindrecord'
- Details
* - bool
- None
- Not supported
* - int8
- int32
-
* - uint8
- bytes(1D uint8)
- Drop dimension
* - int16
- int32
-
* - uint16
- int32
-
* - int32
- int32
-
* - uint32
- int64
-
* - int64
- int64
-
* - uint64
- None
- Not supported
* - float16
- float32
-
* - float32
- float32
-
* - float64
- float64
-
* - string
- string
- Multi-dimensional string not supported
Note:
1. To save the samples in order, set dataset's shuffle to False and num_files to 1.
2. Before calling the function, do not use batch operator, repeat operator or data augmentation operators
with random attribute in map operator.
3. When array dimension is variable, one-dimensional arrays or
multi-dimensional arrays with variable dimension 0 are supported.
4. Mindrecord does not support uint64, multi-dimensional uint8(drop dimension) nor
multi-dimensional string.
Args:
file_name (str): Path to dataset file.
num_files (int, optional): Number of dataset files (default=1).
file_type (str, optional): Dataset format (default='mindrecord').
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
consumer = cde.PythonSaveToDisk(file_name, num_files, file_type)
consumer.Init(ir_tree)
runtime_context.AssignConsumer(consumer)
consumer.Save()
_set_dataset_permissions(file_name, num_files)
del api_tree
@check_tuple_iterator
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
"""
Create an iterator over the dataset. The datatype retrieved back will be a list of ndarrays.
To specify which columns to list and the order needed, use columns_list. If columns_list
is not provided, the order of the columns will remain unchanged.
Args:
columns (list[str], optional): List of columns to be used to specify the order of columns
(default=None, means all columns).
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
(default=-1, iterator can be iterated infinite number of epochs)
output_numpy (bool, optional): Whether or not to output NumPy datatype.
If output_numpy=False, iterator will output MSTensor (default=False).
do_copy (bool, optional): when output data type is mindspore.Tensor,
use this param to select the conversion method, only take False for better performance (default=True).
Returns:
Iterator, tuple iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_tuple_iterator()
>>> for item in iterator:
... # item is a list
... print(type(item))
... break
<class 'list'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'tuple')
return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)
@check_dict_iterator
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
"""
Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
The order of the columns in the dictionary may not be the same as the original order.
Args:
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated
(default=-1, iterator can be iterated infinite number of epochs).
output_numpy (bool, optional): Whether or not to output NumPy datatype,
if output_numpy=False, iterator will output MSTensor (default=False).
Returns:
Iterator, dictionary iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_dict_iterator()
>>> for item in iterator:
... # item is a dict
... print(type(item))
... break
<class 'dict'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'dict')
return DictIterator(self, num_epochs, output_numpy)
def __iter__(self):
"""Create an iterator over the dataset."""
return self.create_tuple_iterator(num_epochs=1)
@property
def input_indexs(self):
"""
Get Input Index Information
Returns:
tuple, tuple of the input index information.
Examples:
>>> # dataset is an instance object of Dataset
>>> # set input_indexs
>>> dataset.input_indexs = 10
>>> print(dataset.input_indexs)
10
"""
if self._input_indexs != ():
return self._input_indexs
# find input_indexes of children
children_input_index = [child.input_indexs for child in self.children]
# in case of more than one child, return the first input_indexes
for cix in children_input_index:
if cix != ():
return cix
# if all children's input_indexes are () or the node is a leaf
return self._input_indexs
@input_indexs.setter
def input_indexs(self, value):
self._input_indexs = value
def copy_batch_size(self, value):
self._batch_size = value
def _init_tree_getters(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.TreeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def __init_size_getter(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.DatasetSizeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def get_col_names(self):
"""
Return the names of the columns in dataset.
Returns:
list, list of column names in the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> col_names = dataset.get_col_names()
"""
if self._col_names is None:
runtime_getter = self._init_tree_getters()
self._col_names = runtime_getter[0].GetColumnNames()
self.close_pool()
runtime_getter[2].notify_watchdog()
return self._col_names
def output_shapes(self):
"""
Get the shapes of output data.
Returns:
list, list of shapes of each column.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_shapes = dataset.output_shapes()
"""
if self.saved_output_shapes is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_shapes
def output_types(self):
"""
Get the types of output data.
Returns:
list, list of data types.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_types = dataset.output_types()
"""
if self.saved_output_types is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_types
def get_dataset_size(self):
"""
Return the number of batches in an epoch.
Returns:
int, number of batches.
Examples:
>>> # dataset is an instance object of Dataset
>>> dataset_size = dataset.get_dataset_size()
"""
if self.dataset_size is None:
runtime_getter = self.__init_size_getter()
self.dataset_size = runtime_getter[0].GetDatasetSize(False)
self.close_pool()
runtime_getter[2].notify_watchdog()
return self.dataset_size
def set_dynamic_columns(self, columns=None):
"""
Set dynamic shape information of source data, it should be set after the pipeline is defined.
Args:
columns (dict): A dict contains shape information of each column in dataset.
The value of shape[i] is :py:obj:`None` indicates that the data length of shape[i] is dynamic.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
... for i in range(1, 100):
... yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
"""
if not isinstance(columns, dict):
raise TypeError("Pass a dict to set dynamic shape, example: {\"data1\": [16, None, 256]}")
self.dynamic_setting[0] = True
self.dynamic_setting[1] = columns
def dynamic_min_max_shapes(self):
"""
Get minimum and maximum data length of dynamic source data, for dynamic graph compilation.
Returns:
lists, min_shapes, max_shapes of source data.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
... for i in range(1, 100):
... yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
>>> min_shapes, max_shapes = dataset.dynamic_min_max_shapes()
"""
if self.saved_min_shapes is None or self.saved_max_shapes is None:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_min_shapes, self.saved_max_shapes
@staticmethod
def __check_dynamic_column_name(dynamic_columns, dataset_columns):
for column in dynamic_columns:
if column not in dataset_columns:
raise RuntimeError("dynamic column [" + column + "] does not match any column in dataset: " +
str(dataset_columns))
@staticmethod
def __check_dynamic_column_shape(data, col, dynamic_columns):
shape_mismatch = "dynamic column [" + col + "] with shape " + str(dynamic_columns[col]) + \
" does not match dataset column [" + col + "] with shape " + str(list(data[col].shape))
if data[col].ndim != len(dynamic_columns[col]):
raise RuntimeError(shape_mismatch)
for dim in range(len(dynamic_columns[col])):
if dynamic_columns[col][dim] is not None and dynamic_columns[col][dim] != data[col].shape[dim]:
raise RuntimeError(shape_mismatch)
def _dynamic_output_shapes(self):
"""
Get dynamic information of source data.
Returns:
lists, dynamic_shapes, min_shapes, max_shapes of source data.
"""
if not self.dynamic_setting[1]:
raise RuntimeError("dynamic_columns is not set, call set_dynamic_columns() by final Dataset Op.")
if self.saved_output_shapes is not None and self.saved_min_shapes is not None and \
self.saved_max_shapes is not None:
return self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes
logger.warning("Calculating dynamic shape of input data, this will take a few minutes...")
# Assume data1 shape is dynamic, data2 shape is fix
# {"data1": [batch_size, None, feat_len], "data2": [batch_size, feat_len]}
dynamic_columns = self.dynamic_setting[1]
# ["data1", "data2"]
dataset_columns = self.get_col_names()
Dataset.__check_dynamic_column_name(dynamic_columns, dataset_columns)
# Shape[1] of data1 is variable
# {"data1": {(batch_size, 100, feat_len), (16, 200, 83)}, "data2": {(batch_size, feat_len)}}
column_shape_set = {col: set() for col in dataset_columns}
dataset_size_counter = 0
for data in self.create_dict_iterator(num_epochs=1, output_numpy=True):
dataset_size_counter += 1
for col in data.keys():
if col in dynamic_columns:
Dataset.__check_dynamic_column_shape(data, col, dynamic_columns)
column_shape_set[col].add(tuple(data[col].shape))
# we get dataset_size after dryrun
self.dataset_size = dataset_size_counter
min_shapes, max_shapes, dynamic_shapes = list(), list(), list()
for col, shape_set in column_shape_set.items():
if len(shape_set) > 1:
if col not in dynamic_columns:
raise RuntimeError("column [" + col + "] has dynamic shape but not set by set_dynamic_columns()" +
", shapes of [" + col + "]: " + str(list(shape_set)))
shape_npy = np.array(list(shape_set))
max_shape = shape_npy.max(axis=0)
min_shape = shape_npy.min(axis=0)
# Set min shape to 1 due to unknown shuffle
min_shape = np.where(np.equal(dynamic_columns[col], None), 1, min_shape)
# Set dynamic dim to -1 for ME
dynamic_shape = np.where(np.equal(dynamic_columns[col], None), -1, dynamic_columns[col])
max_shapes.append(max_shape.tolist())
min_shapes.append(min_shape.tolist())
dynamic_shapes.append(dynamic_shape.tolist())
else:
# Also append fix shape to keep order of column shape
fix_shape = list(list(shape_set)[0])
max_shapes.append(fix_shape)
min_shapes.append(fix_shape)
dynamic_shapes.append(fix_shape)
if col in dynamic_columns:
logger.warning("column [" + col + "] has no dynamic shape but set by set_dynamic_columns()")
# Set min shape to 1 due to unknown shuffle
min_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), 1, fix_shape).tolist()
# Set dynamic dim to -1 for ME
dynamic_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), -1, fix_shape).tolist()
return dynamic_shapes, min_shapes, max_shapes
def num_classes(self):
"""
Get the number of classes in a dataset.
Returns:
int, number of classes.
Examples:
>>> # dataset is an instance object of Dataset
>>> num_classes = dataset.num_classes()
"""
if self._num_classes is None:
runtime_getter = self._init_tree_getters()
self._num_classes = runtime_getter[0].GetNumClasses()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self._num_classes == -1:
return None
return self._num_classes
def get_sync_notifiers(self):
if self.children:
return self.children[0].get_sync_notifiers()
return {}
def disable_sync(self):
if self.children:
return self.children[0].disable_sync()
return {}
def is_sync(self):
if self.children:
return self.children[0].is_sync()
return False
def sync_update(self, condition_name, num_batch=None, data=None):
"""
Release a blocking condition and trigger callback with given data.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (Union[int, None]): The number of batches (rows) that are released.
When num_batch is None, it will default to the number specified by the
sync_wait operator (default=None).
data (Any): The data passed to the callback, user defined (default=None).
"""
if (not isinstance(num_batch, int) and num_batch is not None) or \
(isinstance(num_batch, int) and num_batch <= 0):
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Sync_update batch size can only be positive integer, got : {}.".format(num_batch))
notifiers_dict = self.get_sync_notifiers()
if not isinstance(condition_name, str):
raise TypeError("Argument condition_name with value {} is not of type str, but got {}."
.format(condition_name, type(condition_name)))
if condition_name not in notifiers_dict:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Condition name not found.")
if num_batch is not None:
num_batch *= self.get_batch_size()
notifiers_dict[condition_name](num_batch, data)
def get_batch_size(self):
"""
Return the size of batch.
Returns:
int, the number of data in a batch.
Examples:
>>> # dataset is an instance object of Dataset
>>> batch_size = dataset.get_batch_size()
"""
if self._batch_size is None:
runtime_getter = self._init_tree_getters()
self._batch_size = runtime_getter[0].GetBatchSize()
if self._batch_size is None:
self._batch_size = 1
return self._batch_size
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset (default is 1).
Returns:
int, the count of repeat.
Examples:
>>> # dataset is an instance object of Dataset
>>> repeat_count = dataset.get_repeat_count()
"""
if self._repeat_count is None:
runtime_getter = self._init_tree_getters()
self._repeat_count = runtime_getter[0].GetRepeatCount()
if self._repeat_count is None:
self._repeat_count = 1
return self._repeat_count
def get_class_indexing(self):
"""
Return the class index.
Returns:
dict, a str-to-int mapping from label name to index.
dict, a str-to-list<int> mapping from label name to index for Coco ONLY. The second number
in the list is used to indicate the super category.
Examples:
>>> # dataset is an instance object of Dataset
>>> class_indexing = dataset.get_class_indexing()
"""
if self.children:
return self.children[0].get_class_indexing()
return {}
def reset(self):
"""Reset the dataset for next epoch."""
def is_shuffled(self):
"""Returns True if the dataset or its children is shuffled."""
for input_dataset in self.children:
if input_dataset.is_shuffled():
return True
return False
def is_sharded(self):
"""Returns True if the dataset or its children is sharded."""
for input_dataset in self.children:
if input_dataset.is_sharded():
return True
return False
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def post_parse(self, ir_node):
if self.cache:
ir_node = ir_node.set_cache_client(self.cache.cache_client)
if self.num_parallel_workers:
ir_node = ir_node.set_num_workers(self.num_parallel_workers)
return ir_node
class TextBaseDataset(Dataset):
"""
Abstract class to represent a text source dataset which produces content to the data pipeline.
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
super().__init__(children=children, num_parallel_workers=num_parallel_workers, cache=cache)
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def build_vocab(self, columns, freq_range, top_k, special_tokens, special_first):
"""
Function to create a Vocab from source dataset
Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab
which contains top_k most frequent words (if top_k is specified)
Args:
columns(Union[str, list[str]]): Column names to get words from.
freq_range(tuple[int]): A tuple of integers (min_frequency, max_frequency). Words within the frequency
range will be stored.
Naturally 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency
can be set to default, which corresponds to 0/total_words separately.
top_k(int): Number of words to be built into vocab. top_k most frequent words are
taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken
special_tokens(list[str]): A list of strings, each one is a special token.
special_first(bool): Whether special_tokens will be prepended/appended to vocab, If special_tokens
is specified and special_first is set to default, special_tokens will be prepended.
Returns:
Vocab, vocab built from the dataset.
Examples:
>>> import numpy as np
>>>
>>> def gen_corpus():
... # key: word, value: number of occurrences, reason for using letters is so their order is apparent
... corpus = {"Z": 4, "Y": 4, "X": 4, "W": 3, "U": 3, "V": 2, "T": 1}
... for k, v in corpus.items():
... yield (np.array([k] * v, dtype='S'),)
>>> column_names = ["column1"]
>>> dataset = ds.GeneratorDataset(gen_corpus, column_names)
>>> dataset = dataset.build_vocab(columns=["column1"],
... freq_range=(1, 10), top_k=5,
... special_tokens=["<pad>", "<unk>"],
... special_first=True)
"""
vocab = cde.Vocab()
columns = replace_none(columns, [])
if not isinstance(columns, list):
columns = [columns]
freq_range = replace_none(freq_range, (0, 9223372036854775807))
if freq_range[0] is None:
freq_range = (0, freq_range[1])
if freq_range[1] is None:
freq_range = (freq_range[0], 9223372036854775807)
special_tokens = replace_none(special_tokens, [])
top_k = replace_none(top_k, 9223372036854775807)
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildVocabNode(ir_tree, vocab, columns, freq_range, top_k, special_tokens, special_first)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def build_sentencepiece_vocab(self, columns, vocab_size, character_coverage, model_type, params):
"""
Function to create a SentencePieceVocab from source dataset
Args:
columns(list[str]): Column names to get words from.
vocab_size(int): Vocabulary size.
character_coverage(int): Percentage of characters covered by the model, must be between
0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like
Japanese or Chinese character sets, and 1.0 for other languages with small character sets
like English or Latin.
model_type(SentencePieceModel): Model type. Choose from unigram (default), bpe, char, or word.
The input sentence must be pretokenized when using word type.
params(dict): Any extra optional parameters of sentencepiece library according to your raw data
Returns:
SentencePieceVocab, vocab built from the dataset.
Examples:
>>> from mindspore.dataset.text import SentencePieceModel
>>>
>>> dataset = ds.TextFileDataset("/path/to/sentence/piece/vocab/file", shuffle=False)
>>> dataset = dataset.build_sentencepiece_vocab(["text"], 5000, 0.9995, SentencePieceModel.UNIGRAM, {})
"""
if not isinstance(model_type, SentencePieceModel):
raise TypeError("Argument model_type with value {0} is not of type SentencePieceModel, but got {1}."\
.format(model_type, type(model_type)))
model_type = DE_C_INTER_SENTENCEPIECE_MODE[model_type]
vocab = cde.SentencePieceVocab()
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildSentenceVocabNode(ir_tree, vocab, columns, vocab_size, character_coverage, model_type,
params)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
class SourceDataset(Dataset):
"""
Abstract class to represent a source dataset which produces content to the data pipeline.
"""
def __init__(self, num_parallel_workers=None, num_samples=None, shuffle=True, num_shards=None, shard_id=None,
cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, cache=cache)
self.num_samples = replace_none(num_samples, 0)
self.num_shards = replace_none(num_shards, 1)
self.shard_id = replace_none(shard_id, 0)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
self.shuffle_flag = 2 # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
self.shuffle_flag = 2 # Global shuffle
else:
self.shuffle_flag = 0 # No shuffle
else:
if shuffle == Shuffle.GLOBAL:
self.shuffle_flag = 2 # Global shuffle
elif shuffle == Shuffle.FILES:
self.shuffle_flag = 1 # Files shuffle
elif shuffle == Shuffle.INFILE:
self.shuffle_flag = 3 # Infile shuffle
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
@staticmethod
def _find_files(patterns):
"""
Utility function to search for files with the given glob patterns.
Args:
patterns (Union[str, list[str]]): String or list of patterns to be searched.
Returns:
list, list of files.
"""
if not isinstance(patterns, list):
patterns = [patterns]
file_list = []
unmatched_patterns = []
for pattern in patterns:
matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]
if matches:
file_list.extend(matches)
else:
unmatched_patterns.append(pattern)
if unmatched_patterns:
raise ValueError("The following patterns did not match any files: {}.".format(unmatched_patterns))
if file_list: # not empty
return file_list
raise ValueError("The list of path names matching the patterns is empty.")
def is_shuffled(self):
return self.shuffle_flag > 0
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
class MappableDataset(SourceDataset):
"""
Abstract class to represent a source dataset which supports use of samplers.
"""
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def __init__(self, num_parallel_workers=None, sampler=None, num_samples=None, shuffle=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.shuffle_flag = replace_none(shuffle, True)
self.sampler = samplers.select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
def add_sampler(self, new_sampler):
"""
Add a sampler for current dataset.
Args:
new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.add_sampler(new_sampler)
"""
# note: By adding a sampler, the sampled IDs will flow to new_sampler
# after first passing through the current samplers attached to this dataset.
self.dataset_size = None
new_sampler.add_child(self.sampler)
self.sampler = new_sampler
def use_sampler(self, new_sampler):
"""
Make the current dataset use the new_sampler provided by other API.
Args:
new_sampler (Sampler): The sampler to use for the current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.use_sampler(new_sampler)
"""
if new_sampler is None:
raise TypeError("Input sampler can not be None.")
if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):
raise TypeError("Input sampler is not an instance of a sampler.")
self.dataset_size = None
self.sampler = self.sampler.child_sampler
self.add_sampler(new_sampler)
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference will be added to the first split.
- The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. There is an optimized split function, which will be called automatically when the dataset
that calls this function is a MappableDataset.
2. Dataset should not be sharded if split is going to be called. Instead, create a
DistributedSampler and specify a split to shard after splitting. If the dataset is
sharded after a split, it is strongly recommended setting the same seed in each instance
of execution, otherwise each shard may not be part of the same split (see Examples).
3. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch. Furthermore, if sharding occurs after split, each
shard may not be part of the same split.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir, shuffle=False)
>>>
>>> # Set the seed, and tell split to use this seed when randomizing.
>>> # This is needed because sharding will be done later
>>> ds.config.set_seed(58)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
>>>
>>> # To shard the train dataset, use a DistributedSampler
>>> train_sampler = ds.DistributedSampler(10, 2)
>>> train_dataset.use_sampler(train_sampler)
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
current_split_start_index = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
ds.dataset_size = None
if randomize:
# want to shuffle the same way every epoch before split, we are assuming
# that the user will call set_seed
random_sampler = samplers.RandomSampler()
random_sampler.reshuffle_each_epoch = False
ds.add_sampler(random_sampler)
subset_sampler = samplers.SequentialSampler(current_split_start_index, size)
ds.add_sampler(subset_sampler)
# add sequential sampler, so that if user calls use_sampler, we will
# get rid of the sequential sampler instead of something we need
ds.add_sampler(samplers.SequentialSampler())
splits.append(ds)
current_split_start_index += size
return tuple(splits)
class BucketBatchByLengthDataset(Dataset):
"""
The result of applying BucketBatchByLength operator to the input dataset.
"""
def __init__(self, input_dataset, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function,
pad_info, pad_to_bucket_boundary, drop_remainder):
super().__init__(children=input_dataset)
self.column_names = to_list(column_names)
self.bucket_boundaries = replace_none(bucket_boundaries, [])
self.bucket_batch_sizes = replace_none(bucket_batch_sizes, [])
self.element_length_function = element_length_function
self.pad_info = replace_none(pad_info, {})
self.pad_to_bucket_boundary = replace_none(pad_to_bucket_boundary, False)
self.drop_remainder = replace_none(drop_remainder, False)
def parse(self, children=None):
return cde.BucketBatchByLengthNode(children[0], self.column_names, self.bucket_boundaries,
self.bucket_batch_sizes, self.element_length_function, self.pad_info,
self.pad_to_bucket_boundary, self.drop_remainder)
class BatchDataset(Dataset):
"""
The result of applying Batch operator to the input dataset.
Args:
input_dataset (Dataset): Input Dataset to be batched.
batch_size (Union[int, function]): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable must always be a BatchInfo object.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list must
match with signature of per_batch_map callable.
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
will pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
"""
def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
if BatchDataset._is_ancestor_of_repeat(input_dataset):
logger.warning("Repeat is located before batch, data from two epochs can be batched together.")
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
# if batch_size is callable, set batch_size to 1 and batch_size_func to that callable function
self.batch_size = batch_size if not callable(batch_size) else 1
self.batch_size_func = None if not callable(batch_size) else batch_size
self.drop_remainder = replace_none(drop_remainder, False)
self.per_batch_map = per_batch_map
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = to_list(column_order)
self.pad = bool(pad_info is not None)
self.pad_info = replace_none(pad_info, dict())
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.eot = None
self.watch_dog = None
self.workers = []
self.max_rowsize = max_rowsize
def parse(self, children=None):
return cde.BatchNode(children[0], self.batch_size, self.drop_remainder, self.pad, self.input_columns,
self.output_columns, self.column_order, self.batch_size_func, self.per_batch_map,
self.pad_info)
@staticmethod
def _is_ancestor_of_repeat(dataset):
"""
Utility function to find the case where repeat is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether repeat is used before batch.
"""
if isinstance(dataset, RepeatDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)
return flag
@staticmethod
def _update_batch_size_for_syncwait(dataset, batch_size):
"""
Utility function to notify batch size to sync_wait.
Args:
dataset (Dataset): Dataset to be checked.
batch_size (int): batch size to notify.
"""
if isinstance(dataset, SyncWaitDataset):
dataset.update_sync_batch_size(batch_size)
for input_dataset in dataset.children:
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("per_batch_map", "batch_size_func", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
if self.per_batch_map is None:
logger.warning("per_batch_map is None so python_multiprocessing does not work.")
return
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
else:
num_parallel = get_num_parallel_workers()
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize * self.batch_size, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=([self.per_batch_map], arg_q_list, res_q_list))
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
process_id = {op_id: [self.num_parallel_workers, set()]}
# obtain process id from multiprocessing.pool
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.workers.append(pool)
with _LOCK:
_OP_PROCESS.update(process_id)
# Wrap per_batch_map into _PythonCallable
self.per_batch_map = _PythonCallable(self.per_batch_map, idx, self.process_pool, arg_q_list, res_q_list)
self.hook = _ExceptHookHandler()
# batch will launch a watch dog thread to monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
else:
if self.per_batch_map is not None:
self.per_batch_map = FuncWrapper(self.per_batch_map)
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.workers, self.process_pool))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class BatchInfo(cde.CBatchInfo):
"""
Only the batch size function and per_batch_map of the batch operator can dynamically adjust parameters
based on the number of batches and epochs during training.
"""
def get_batch_num(self):
"""
Return the batch number of the current batch.
"""
return
def get_epoch_num(self):
"""
Return the epoch number of the current batch.
"""
return
class BlockReleasePair:
"""
The blocking condition class used by SyncWaitDataset.
Args:
init_release_rows (int): Number of lines to allow through the pipeline.
callback (function): The callback function that will be called when release is called (default=None).
"""
def __init__(self, init_release_rows, callback=None):
if isinstance(init_release_rows, int) and init_release_rows <= 0:
raise ValueError("release_rows need to be greater than 0.")
self.row_count = -init_release_rows
self.cv = threading.Condition()
self.callback = callback
self.default_rows = init_release_rows
self.disable = False
def __deepcopy__(self, memodict):
return self
def reset(self):
with self.cv:
self.row_count = -self.default_rows
self.cv.notify_all()
def update_batched_size(self, batch_size):
# sanity check
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("batch_size need to be greater than 0.")
# should only use before the pipeline creates
self.row_count *= batch_size
self.default_rows *= batch_size
def block_func(self):
"""
Function for handing blocking condition.
Returns:
bool, True.
"""
with self.cv:
# if disable is true, the always evaluate to true
not_time_out = self.cv.wait_for(lambda: (self.row_count < 0 or self.disable),
timeout=get_callback_timeout())
# time_out will be False if time out occurs
if not not_time_out:
logger.warning("Timeout happened in sync_wait, maybe dataset.sync_update(condition=...) "
"is not added after dataset.create_dict_iterator(...), now disabling lock.")
self.disable = True
self.row_count += 1
return True
def release_func(self, pass_rows=None, data=None):
with self.cv:
if pass_rows is None:
pass_rows = self.default_rows
self.row_count -= pass_rows
if self.callback is not None:
self.callback(data)
self.cv.notify_all()
def disable_lock(self):
with self.cv:
self.disable = True
self.cv.notify_all()
class SyncWaitDataset(Dataset):
"""
The result of adding a blocking condition to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to apply flow control.
num_batch (int): Number of batches without blocking at the start of each epoch.
condition_name (str): Condition name that is used to toggle sending next row.
callback (function): Callback function that will be invoked when sync_update is called (default=None).
Raises:
RuntimeError: If condition name already exists.
"""
def __init__(self, input_dataset, condition_name, num_batch, callback=None):
super().__init__(children=input_dataset)
# set to the default value, waiting for the batch to update it
self._condition_name = condition_name
if isinstance(num_batch, int) and num_batch <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair = BlockReleasePair(num_batch, callback)
if self._condition_name in self.children[0].get_sync_notifiers():
raise RuntimeError("Condition name is already in use.")
logger.info("Please remember to add dataset.sync_update(condition=%s), otherwise hanging will result. "
"If dataset.sync_update(condition=%s) has already been added, you can ignore the info.",
condition_name, condition_name)
def parse(self, children=None):
return cde.SyncWaitNode(children[0], self._condition_name, self._pair.block_func)
def get_sync_notifiers(self):
return {**self.children[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}
def is_sync(self):
return True
def update_sync_batch_size(self, batch_size):
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair.update_batched_size(batch_size)
def disable_sync(self):
logger.info("Disabling Sync")
self._pair.disable_lock()
@staticmethod
def _is_ancestor_of_batch(dataset):
"""
Utility function to find the case where sync_wait is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether sync_wait is used before batch.
"""
if isinstance(dataset, BatchDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)
return flag
def iterator_bootstrap(self):
self._pair.reset()
class ShuffleDataset(Dataset):
"""
The result of applying Shuffle operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be shuffled.
buffer_size (int): Size of the buffer.
Raises:
RuntimeError: If exist sync operators before shuffle.
"""
def __init__(self, input_dataset, buffer_size):
super().__init__(children=input_dataset)
self.buffer_size = buffer_size
self.reshuffle_each_epoch = True
if self.is_sync():
raise RuntimeError("No shuffle after sync operators.")
def parse(self, children=None):
return cde.ShuffleNode(children[0], self.buffer_size, self.reshuffle_each_epoch)
def is_shuffled(self):
return True
# This wait function is for cleaning zombie subprocesses
def wait_pid():
"""
This function is used by the main process to release subprocess resources.
"""
try:
while True:
child_pid, _ = os.waitpid(-1, os.WNOHANG)
if child_pid == 0:
break
except OSError:
# waitpid may be failed for some reasons so we ignore this error
pass
# Terminate subprocess launched by multiprocessing.pool
def _terminate_process(workers):
for w in workers:
if w.exitcode is None:
w.terminate()
for w in workers:
if w._closed is False: # pylint: disable=W0212
w.join()
# Monitor the exit number of subprocesses
def _monitor_subprocess_exit(workers):
subprocess_exit_num = 0
for w in workers:
if w.exitcode is not None:
subprocess_exit_num += 1
return subprocess_exit_num
# Dataset need _watch_dog thread to monitoring fork multi-processing,
# and thread can't be a member function otherwise python won't collect and release resources.
def _watch_dog(eot, workers, pool=None):
"""
This thread is for monitoring subprocesses forked by GeneratorDataset/map/batch
"""
if not isinstance(workers, list):
raise TypeError("[Internal Error] The 2rd parameter of watch dog thread should be list of process, "\
"but got {}.".format(type(workers)))
if pool is not None and not isinstance(pool, multiprocessing.pool.Pool):
raise TypeError("[Internal Error] The 3rd parameter of watch dog thread should be multiprocessing.Pool, "\
"but got {}".format(type(pool)))
while not eot.is_set():
subprocess_exit_num = 0
# Monitoring and count how many subprocesses already exit
subprocess_exit_num = _monitor_subprocess_exit(workers)
# If find subprocess exit, we will wait for 30s and do some waitpid operations
if subprocess_exit_num > 0:
if pool is not None:
# Python multiprocessing.pool has a bug, if sub process of pool is killed, pool will launch
# a new sub process, so we have to set worker_handler._state to TERMINATE to stop relaunching.
if pool._state == RUN: # pylint: disable=W0212
pool._state = TERMINATE # pylint: disable=W0212
pool._worker_handler._state = TERMINATE # pylint: disable=W0212
start = time.time()
while time.time() - start < 30:
# We need to distinguishing get_dataset_size or train finished normally and hang scenario.
# If get_dataset_size or train finished normally, _stop_subprocess can be execute and
# self.need_abort can be set to True. If main process is hang in get(), self.need_abort
# will never set to True, then we wait for 30s and kill main process
if eot.is_set():
return
# Sometimes subprocess may be zombie, so in 30s we can wait and do some useful tasks(waitpid).
wait_pid()
# multiprocessing.queue may hang in .get() forever when put() process was killed.
# We have to exit main process otherwise main process will hang.
if pool is not None:
_terminate_process(pool._pool) # pylint: disable=W0212
else:
_terminate_process(workers)
logger.critical("The subprocess of dataset may exit unexpected or be killed, "
"main process will exit.")
os.kill(os.getpid(), signal.SIGTERM)
# Pyfunc collection for multiprocess pyfunc
# This global variable will only be used within subprocesses
_GLOBAL_PYFUNC_LIST = []
_ARGS_QUEUE = []
_RET_QUEUE = []
_OP_NAME = dict()
_OP_PROCESS = dict()
_LOCK = threading.Lock()
# Pyfunc worker init function
# Python multiprocessing library forbid sending lambda function through pipe.
# This init function allow us to add all Python function to a global collection and then fork afterwards.
def _pyfunc_worker_init(pyfunc_list, args_queue, ret_queue):
global _GLOBAL_PYFUNC_LIST
global _ARGS_QUEUE
global _RET_QUEUE
_GLOBAL_PYFUNC_LIST = pyfunc_list
_ARGS_QUEUE = args_queue
_RET_QUEUE = ret_queue
# Pyfunc worker execution function
# All exceptions will be raised to main processes
def _pyfunc_worker_exec(index, qid, *args):
"""
Internal function for call certain pyfunc in Python process.
"""
# Some threads in multiprocess.pool can't process sigint signal,
# and will occur hang problem, so ctrl+c will pass to parent process.
signal.signal(signal.SIGINT, signal.SIG_IGN)
if qid != -1:
# Pass arguments through the Queue instead of directly to remote process
args = _ARGS_QUEUE[qid].get()
try:
r = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
return ExceptionHandler(where="in map(or batch) worker and execute python function")
if isinstance(r, tuple):
_RET_QUEUE[qid].put(r)
else:
_RET_QUEUE[qid].put((r,))
return [qid]
# not using shared memory for passing arguments, call function directly
result = None
try:
result = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
return result
# PythonCallable wrapper for multiprocess pyfunc
class _PythonCallable:
"""
Internal Python function wrapper for multiprocessing pyfunc.
"""
def __init__(self, py_callable, idx, pool=None, arg_q=None, res_q=None):
# Original Python callable from user.
self.py_callable = py_callable
# Process pool created for current iterator.
self.pool = pool
# Python callable index for subprocess _GLOBAL_PYFUNC_LIST
self.idx = idx
if pool is not None:
self.queuemap = {}
self.arg_q = arg_q
self.res_q = res_q
self.next_queue = 0
def __call__(self, *args):
if self._pool_is_running() and check_iterator_cleanup() is False:
result, qid, ret = self._send(*args)
if ret:
return result
# todo this check might be wrong
while check_iterator_cleanup() is False:
try:
return self._receive(result, qid)
except multiprocessing.TimeoutError:
continue
except KeyboardInterrupt:
_set_iterator_cleanup()
self.pool.close()
self.pool.join()
raise Exception("Multiprocess MapOp worker receives KeyboardInterrupt.")
return (None,)
# Invoke original Python callable in master process in case the pool is gone.
return self.py_callable(*args)
def to_json(self):
return self.py_callable.to_json()
def _send(self, *args):
"""
The map/batch operator will use multiprocessing-pool apply_async interface to execute python function
in a sub process, apply_async will release GIL temporarily. For better performance, we use shared memory
feature and pass shared queue instead of multiprocess args.
"""
ret = False
qid = None
if self.arg_q != []:
tid = threading.get_ident()
# Need to register each thread to use a different queue to send data to pool
if not tid in self.queuemap:
qid = self.next_queue
self.next_queue = self.next_queue + 1
self.queuemap[tid] = qid
else:
qid = self.queuemap[tid]
self.arg_q[qid].put(args)
# This call will send the tensors along with Python callable index to the process pool.
# Block, yield GIL. Current thread will reacquire GIL once result is returned.
if self._pool_is_running() and check_iterator_cleanup() is False:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, qid, []])
else:
ret = True
result = self.py_callable(*args)
else:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, -1, *args])
return result, qid, ret
def _receive(self, result, qid):
"""
The map/batch operator will use multiprocessing-pool get interface to sync output data from a sub process,
get interface will reacquire GIL. For better performance, we use shared memory feature and get data from
shared queue directly.
"""
if self.arg_q != []:
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
if r[0] != qid:
raise Exception("In PyCallable, got results from wrong thread")
r = self.res_q[qid].get()
return r
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
return r
def _pool_is_running(self):
# note here: the RUN state of python3.7 and python3.8 is different:
# python3.7: RUN = 0
# python3.8: RUN = "RUN"
# so we use self.pool._state == RUN instead and we can't use _state == 0 any more.
if self.pool is not None and self.pool._state == RUN: # pylint: disable=W0212
return True
return False
def _mp_pool_exit_preprocess():
if check_iterator_cleanup() is False:
# Set the iterator_cleanup flag to True before exiting, and wait 3s for all apply_async
# applied to the multiprocessing task to prevent multiprocessing from hang when exiting
_set_iterator_cleanup()
time.sleep(3)
class _ExceptHookHandler:
def __init__(self):
sys.excepthook = self.__handler_exception
def __handler_exception(self, ex_type, value, tb):
logger.critical("Uncaught exception: ", exc_info=(ex_type, value, tb))
_mp_pool_exit_preprocess()
class MapDataset(TextBaseDataset, Dataset):
"""
The result of applying the Map operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
operations (TensorOp): A function mapping a nested structure of tensors
to another nested structure of tensor (default=None).
input_columns (Union[str, list[str]]): List of names of the input columns
(default=None, the operations will be applied on the first columns in the dataset).
The size of the list should match the number of inputs of the first operator.
output_columns (Union[str, list[str]], optional): List of names of the output columns.
The size of the list should match the number of outputs of the last operator
(default=None, output columns will be the input columns, i.e., the columns will
be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=None).
Raises:
ValueError: If len(input_columns) != len(output_columns) and column_order is not specified.
"""
def __init__(self, input_dataset, operations=None, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16,
offload=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers, cache=cache)
self.operations = to_list(operations)
self.operations = py_transforms.Compose.reduce(self.operations)
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = replace_none(column_order, [])
# If output_columns were not provided then use input_columns
self.output_columns = self.input_columns if not self.output_columns else self.output_columns
if self.input_columns and self.output_columns \
and len(self.input_columns) != len(self.output_columns) \
and not self.column_order:
raise ValueError("When length of input_columns and output_columns are not equal,"
" column_order must be specified.")
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.eot = None
self.watch_dog = None
self.workers = []
self.callbacks = to_list(callbacks)
self.max_rowsize = max_rowsize
self.offload = offload
def parse(self, children=None):
operations = []
for op in self.operations:
if op and getattr(op, 'parse', None):
operations.append(op.parse())
else:
operations.append(op)
callbacks = [cb.create_runtime_obj() for cb in self.callbacks]
return cde.MapNode(children[0], operations, self.input_columns, self.output_columns, self.column_order,
callbacks, self.max_rowsize, OffloadToManualOffloadMode[self.offload])
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("operations", "callbacks", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
iter_specific_operations = []
callable_list = []
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
num_parallel = get_num_parallel_workers()
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
# Pass #1, look for Python callables and build list
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
callable_list.append(op)
if callable_list:
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=(callable_list, arg_q_list, res_q_list))
# Pass #2
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
# obtain process id from multiprocessing.pool
process_id = {op_id: [self.num_parallel_workers, set()]}
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.workers.append(pool)
with _LOCK:
_OP_PROCESS.update(process_id)
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
# Wrap Python callable into _PythonCallable
iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool,
arg_q_list, res_q_list))
idx += 1
else:
# CPP ops remain the same
iter_specific_operations.append(op)
self.operations = iter_specific_operations
self.hook = _ExceptHookHandler()
# Map multiprocessing will launch a watch dog thread for monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
@staticmethod
def __operation_valid_for_multiprocessing(op):
if callable(op) and str(op).find("c_transform") < 0:
return True
return False
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.workers, self.process_pool))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
self.process_pool.join()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class FilterDataset(Dataset):
"""
The result of applying filter predicate to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns
(default=None, the predicate will be applied to all columns in the dataset).
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
"""
def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
self.predicate = lambda *args: bool(predicate(*args))
self.input_columns = to_list(input_columns)
def parse(self, children=None):
return cde.FilterNode(children[0], self.predicate, self.input_columns)
class RepeatDataset(Dataset):
"""
The result of applying Repeat operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be repeated.
count (int): Number of times the dataset will be repeated (default=-1, repeat indefinitely).
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = replace_none(count, -1)
def parse(self, children=None):
return cde.RepeatNode(children[0], self.count)
class SkipDataset(Dataset):
"""
The result of applying Skip operator to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to have elements skipped.
count (int): Number of elements to be skipped in the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(input_dataset)
self.count = count
def parse(self, children=None):
return cde.SkipNode(children[0], self.count)
class TakeDataset(Dataset):
"""
The result of applying Take operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to have elements taken from.
count (int): Number of elements to be taken from the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = count
def parse(self, children=None):
return cde.TakeNode(children[0], self.count)
class ZipDataset(Dataset):
"""
The result of applying Zip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be zipped together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
def parse(self, children=None):
return cde.ZipNode(children)
def is_sync(self):
return any([c.is_sync() for c in self.children])
class ConcatDataset(Dataset):
"""
The result of applying concat dataset operator to the input Dataset.
Args:
datasets (list): A list of datasets to be concatenated together.
Raises:
TypeError: If dataset is not an instance of Dataset.
ValueError: If there is no samples in the one of the datasets.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
self.datasets = datasets
self._sampler = samplers.SequentialSampler(num_samples=None)
self.children_sizes_ = [c.get_dataset_size() for c in self.children]
child_index = 0
for item in self.children_sizes_:
if item == 0:
raise ValueError("There are no samples in the dataset number %d. Please make sure there are "
"valid samples in the dataset." % child_index)
child_index += 1
# _children_flag_and_nums: A list of pair<int ,int>.The first element of pair is flag that characterizes
# whether the data set is mappable. The second element of pair is length of the dataset
self._children_flag_and_nums = []
# _children_start_end_index_: A list of pair<int ,int>.The elements of pair are used to characterize
# the valid position of the dataset corresponding to the subscript when sampling
self._children_start_end_index_ = []
for index, child in enumerate(self.children):
tem_list = [-1, -1]
self._children_start_end_index_.append(tem_list)
dataset_len = self.children_sizes_[index]
if isinstance(child, GeneratorDataset) and not hasattr(child.source, "__getitem__"):
dataset_len = 0
self.children_sizes_[index] = 0
if isinstance(child, MappableDataset):
self._children_flag_and_nums.append((0, dataset_len))
else:
self._children_flag_and_nums.append((1, dataset_len))
def parse(self, children=None):
return cde.ConcatNode(children, self._sampler, self._children_flag_and_nums, self._children_start_end_index_)
def use_sampler(self, sampler):
"""
Set the distributedSampler to concat dataset
Args:
sampler (Sampler): The sampler to use for the current dataset.
Currently supported: DistributedSampler.
Raises:
TypeError: If the sampler is not an instance of DistributedSampler
ValueError: If the parameter shuffle of sampler is True
ValueError: If the parameter NumSamples of sampler is not None.
ValueError: If num_shards <=0.
"""
if not isinstance(sampler, samplers.DistributedSampler):
raise TypeError("The parameter %s of concat must be DistributedSampler!" % sampler)
if sampler.is_shuffled():
raise ValueError("The parameter shuffle of DistributedSampler must be False!")
if sampler.num_shards <= 0:
raise ValueError("The parameter num_shards of DistributedSampler must be positive int!")
if sampler.get_num_samples() is not None:
raise ValueError("The parameter num_samples of DistributedSampler is not support to be set!")
self.dataset_size = None
self._sampler = sampler
cumulative_samples_nums = 0
for index, child in enumerate(self.children):
if hasattr(child, 'sampler') and child.sampler.get_num_samples() is not None:
raise ValueError("The parameter NumSamples of %s is not support to be set!" % child)
if isinstance(child, BatchDataset):
raise TypeError("The parameter %s of concat must not be BatchDataset!" % child)
# if child is mappable and the length is greater than 0
if not self._children_flag_and_nums[index][0] and self._children_flag_and_nums[index][1]:
tem_value = cumulative_samples_nums + self._children_flag_and_nums[index][1]
if not self._children_flag_and_nums[index][1] >= sampler.num_shards:
if tem_value < sampler.num_shards:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value
else:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value % sampler.num_shards
tem_sampler = copy.deepcopy(sampler)
tem_sampler.set_offset(cumulative_samples_nums)
child.use_sampler(tem_sampler)
cumulative_samples_nums += self.children_sizes_[index]
cumulative_samples_nums %= sampler.num_shards
class RenameDataset(Dataset):
"""
The result of applying Rename operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
"""
def __init__(self, input_dataset, input_columns, output_columns):
super().__init__(children=input_dataset)
self.input_column_names = to_list(input_columns)
self.output_column_names = to_list(output_columns)
def parse(self, children=None):
return cde.RenameNode(children[0], self.input_column_names, self.output_column_names)
def to_list(items):
if items is None:
return []
if isinstance(items, tuple):
return list(items)
if not isinstance(items, list):
return [items]
return items
class ProjectDataset(Dataset):
"""
The result of applying Project operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Projected.
columns (Union[str, list[str]]): List of names of the columns to project.
"""
def __init__(self, input_dataset, columns):
super().__init__(children=input_dataset)
self.columns = to_list(columns)
def parse(self, children=None):
return cde.ProjectNode(children[0], self.columns)
class _ToDevice:
"""
Internal class to handle sending data to device.
"""
def __init__(self, dataset, num_epochs):
ir_tree, self.api_tree = dataset.create_ir_tree()
self._runtime_context = cde.PythonRuntimeContext()
self._runtime_context.Init()
self._to_device = cde.ToDevice(num_epochs)
self._to_device.Init(ir_tree)
self._runtime_context.AssignConsumer(self._to_device)
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()
def send(self):
self._to_device.Send()
def stop_send(self):
"""
send stop send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.StopSend()
def continue_send(self):
"""
send continue send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.ContinueSend()
def get_data_info(self):
"""
Get type and shape of current batch.
"""
return self._to_device.GetDataInfo()
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if hasattr(self, '_runtime_context') and self._runtime_context:
if hasattr(self, '_to_device') and self._to_device:
self._runtime_context.Terminate()
del self._to_device
del self._runtime_context
def __deepcopy__(self, memodict):
return self
def get_offload_model(self):
"""
Get offload model containing removed offload ops from pipeline.
"""
offload_model = GetOffloadModel(self._to_device)
return offload_model
class TransferDataset(Dataset):
"""
The result of applying TDT operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be transferred.
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not (default=False).
Raises:
TypeError: If device_type is empty.
ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.
RuntimeError: If dataset is unknown.
"""
def __init__(self, input_dataset, send_epoch_end=True, create_data_info_queue=False):
super().__init__(children=input_dataset)
self.queue_name = str(uuid.uuid1())
self.device_type = context.get_context("device_target") if context else "CPU"
self.device_id = context.get_context("device_id") if context else 0
self._send_epoch_end = replace_none(send_epoch_end, True)
self._create_data_info_queue = create_data_info_queue
self._to_device = None
def parse(self, children=None):
total_batch = 0
if hasattr(self.children[0], "__total_batch__"):
total_batch = self.children[0].__total_batch__
return cde.TransferNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
total_batch, self._create_data_info_queue)
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
raise RuntimeError("TransferDataset is not iterable.")
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
raise RuntimeError("TransferDataset is not iterable.")
def __iter__(self):
raise RuntimeError("TransferDataset is not iterable.")
def output_shapes(self):
raise RuntimeError("TransferDataset does not support obtaining output_shapes.")
def output_types(self):
raise RuntimeError("TransferDataset does not support obtaining output_types.")
@check_to_device_send
def send(self, num_epochs=-1):
"""
Send to device
"""
if Dataset._noop_mode():
return
if self._to_device is not None:
del self._to_device
self._to_device = _ToDevice(self, num_epochs)
self._to_device.send()
def stop_send(self):
if self._to_device is not None:
self._to_device.stop_send()
def continue_send(self):
if self._to_device is not None:
self._to_device.continue_send()
def get_data_info(self):
"""
Get type and shape of current batch
"""
if self._to_device is not None:
return self._to_device.get_data_info()
raise RuntimeError("Calling get_data_info with bad state.")
def get_offload_model(self):
if self._to_device is not None:
return self._to_device.get_offload_model()
raise RuntimeError("get_offload_model, _to_device is None")
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if self._to_device is not None:
self._to_device.release()
class RangeDataset(MappableDataset):
"""
A source dataset that reads and parses datasets stored on disk in a range.
Args:
start (int): Starting index.
stop (int): Ending index.
step (int): Step size in the range specified by start and stop.
"""
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def is_shuffled(self):
return False
def is_sharded(self):
return False
def get_dataset_size(self):
if self.dataset_size is None:
self.dataset_size = math.ceil((self.stop - self.start) / self.step)
return self.dataset_size
class FashionMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the FASHION-MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> fashion_mnist_dataset_dir = "/path/to/fashion_mnist_dataset_directory"
>>>
>>> # Read 3 samples from FASHIONMNIST dataset
>>> dataset = ds.FashionMnistDataset(dataset_dir=fashion_mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In FASHIONMNIST dataset, each dictionary has keys "image" and "label"
About Fashion-MNIST dataset:
Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and
a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.
We intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking
machine learning algorithms. It shares the same image size and structure of training and testing splits.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── fashionmnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@online{xiao2017/online,
author = {Han Xiao and Kashif Rasul and Roland Vollgraf},
title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms},
date = {2017-08-28},
year = {2017},
eprintclass = {cs.LG},
eprinttype = {arXiv},
eprint = {cs.LG/1708.07747},
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.FashionMnistNode(self.dataset_dir, self.usage, self.sampler)
class ImageFolderDataset(MappableDataset):
"""
A source dataset that reads images from a tree of directories.
All images within one folder have the same label.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
extensions (list[str], optional): List of file extensions to be
included in the dataset (default=None).
class_indexing (dict, optional): A str-to-int mapping from folder name to index
(default=None, the folder names will be sorted
alphabetically and each class will be given a
unique index starting from 0).
decode (bool, optional): Decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> image_folder_dataset_dir = "/path/to/image_folder_dataset_directory"
>>>
>>> # 1) Read all samples (image files) in image_folder_dataset_dir with 8 threads
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... num_parallel_workers=8)
>>>
>>> # 2) Read all samples (image files) from folder cat and folder dog with label 0 and 1
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... class_indexing={"cat":0, "dog":1})
>>>
>>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG and .png (case sensitive)
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... extensions=[".JPEG", ".png"])
About ImageFolderDataset:
You can construct the following directory structure from your dataset files and read by MindSpore's API.
.. code-block::
.
└── image_folder_dataset_directory
├── class1
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class2
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class3
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── classN
├── ...
"""
@check_imagefolderdataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, sampler=None,
extensions=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.extensions = replace_none(extensions, [])
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.ImageFolderNode(self.dataset_dir, self.decode, self.sampler, self.extensions, self.class_indexing)
class IMDBDataset(MappableDataset):
"""
A source dataset for reading and parsing Internet Movie Database (IMDb).
The generated dataset has two columns: :py:obj:`[text, label]`.
The tensor of column :py:obj:`text` is of the string type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`
(default=None, will read all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the test column.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> imdb_dataset_dir = "/path/to/imdb_dataset_directory"
>>>
>>> # 1) Read all samples (text files) in imdb_dataset_dir with 8 threads
>>> dataset = ds.IMDBDataset(dataset_dir=imdb_dataset_dir, num_parallel_workers=8)
>>>
>>> # 2) Read train samples (text files).
>>> dataset = ds.IMDBDataset(dataset_dir=imdb_dataset_dir, usage="train")
About IMDBDataset:
The IMDB dataset contains 50, 000 highly polarized reviews from the Internet Movie Database (IMDB). The data set
was divided into 25 000 comments for training and 25 000 comments for testing, with both the training set and test
set containing 50% positive and 50% negative comments. Train labels and test labels are all lists of 0 and 1, where
0 stands for negative and 1 for positive.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── imdb_dataset_directory
├── train
│ ├── pos
│ │ ├── 0_9.txt
│ │ ├── 1_7.txt
│ │ ├── ...
│ ├── neg
│ │ ├── 0_3.txt
│ │ ├── 1_1.txt
│ │ ├── ...
├── test
│ ├── pos
│ │ ├── 0_10.txt
│ │ ├── 1_10.txt
│ │ ├── ...
│ ├── neg
│ │ ├── 0_2.txt
│ │ ├── 1_3.txt
│ │ ├── ...
Citation:
.. code-block::
@InProceedings{maas-EtAl:2011:ACL-HLT2011,
author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan
and Ng, Andrew Y. and Potts, Christopher},
title = {Learning Word Vectors for Sentiment Analysis},
booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics:
Human Language Technologies},
month = {June},
year = {2011},
address = {Portland, Oregon, USA},
publisher = {Association for Computational Linguistics},
pages = {142--150},
url = {http://www.aclweb.org/anthology/P11-1015}
}
"""
@check_imdb_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None, sampler=None,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.IMDBNode(self.dataset_dir, self.usage, self.sampler)
class IWSLT2016Dataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses IWSLT2016 datasets.
The generated dataset has two columns: :py:obj:`[text, translation]`.
The tensor of column :py:obj: `text` is of the string type.
The tensor of column :py:obj: `translation` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include "train", "valid", "test" and "all" (default=None, all samples).
language_pair (sequence, optional): Sequence containing source and target language, supported values are
(`en`, `fr`), ("en", "de"), ("en", "cs"), ("en", "ar"), ("fr", "en"), ("de", "en"), ("cs", "en"),
("ar", "en") (default=("de", "en")).
valid_set (str, optional): A string to identify validation set, when usage is valid or all, the validation set
of valid_set type will be read, supported values are "dev2010", "tst2010", "tst2011", "tst2012", "tst2013"
and "tst2014" (default="tst2013").
test_set (str, optional): A string to identify test set, when usage is test or all, the test set of test_set
type will be read, supported values are "dev2010", "tst2010", "tst2011", "tst2012", "tst2013" and "tst2014"
(default="tst2014").
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> iwslt2016_dataset_dir = "/path/to/iwslt2016_dataset_dir"
>>> dataset = ds.IWSLT2016Dataset(dataset_files=iwslt2016_dataset_dir, usage='all',
... language_pair=('de', 'en'), valid_set='tst2013', test_set='tst2014')
About IWSLT2016 dataset:
IWSLT is an international oral translation conference, a major annual scientific conference dedicated to all aspects
of oral translation. The MT task of the IWSLT evaluation activity constitutes a data set, which can be publicly
obtained through the WIT3 website wit3.fbk.eu. The IWSLT2016 data set includes translations from English to Arabic,
Czech, French, and German, and translations from Arabic, Czech, French, and German to English.
You can unzip the original IWSLT2016 dataset files into this directory structure and read by MindSpore's API. After
decompression, you also need to decompress the data set to be read in the specified folder. For example, if you want
to read the data set of de-en, you need to unzip the tgz file in the de/en directory, the data set is in the
unzipped folder.
.. code-block::
.
└── iwslt2016_dataset_directory
├── subeval_files
└── texts
├── ar
│ └── en
│ └── ar-en
├── cs
│ └── en
│ └── cs-en
├── de
│ └── en
│ └── de-en
│ ├── IWSLT16.TED.dev2010.de-en.de.xml
│ ├── train.tags.de-en.de
│ ├── ...
├── en
│ ├── ar
│ │ └── en-ar
│ ├── cs
│ │ └── en-cs
│ ├── de
│ │ └── en-de
│ └── fr
│ └── en-fr
└── fr
└── en
└── fr-en
Citation:
.. code-block::
@inproceedings{cettoloEtAl:EAMT2012,
Address = {Trento, Italy},
Author = {Mauro Cettolo and Christian Girardi and Marcello Federico},
Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation
(EAMT)},
Date = {28-30},
Month = {May},
Pages = {261--268},
Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks},
Year = {2012}}
"""
@check_iwslt2016_dataset
def __init__(self, dataset_dir, usage=None, language_pair=None, valid_set=None, test_set=None,
num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, num_parallel_workers=None,
cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
self.language_pair = replace_none(language_pair, ["de", "en"])
self.valid_set = replace_none(valid_set, 'tst2013')
self.test_set = replace_none(test_set, 'tst2014')
def parse(self, children=None):
return cde.IWSLT2016Node(self.dataset_dir, self.usage, self.language_pair, self.valid_set, self.test_set,
self.num_samples, self.shuffle_flag, self.num_shards, self.shard_id)
class IWSLT2017Dataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses IWSLT2017 datasets.
The generated dataset has two columns: :py:obj:`[text, translation]`.
The tensor of column :py:obj:`text` is of the string type.
The tensor of column :py:obj:`translation` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include "train", "valid", "test" and "all" (default=None, all samples).
language_pair (list, optional): List containing src and tgt language, supported values are ("en", "nl"),
("en", "de"), ("en", "it"), ("en", "ro"), ("nl", "en"), ("nl", "de"), ("nl", "it"), ("nl", "ro"),
("de", "en"), ("de", "nl"), ("de", "it"), ("de", "ro"), ("it", "en"), ("it", "nl"), ("it", "de"),
("it", "ro"), (`ro`, `en`), (`ro`, `nl`), (`ro`, `de`), (`ro`, `it`) (default=(`de`, `en`)).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> iwslt2017_dataset_dir = "/path/to/iwslt207_dataset_dir"
>>> dataset = ds.IWSLT2017Dataset(dataset_files=iwslt2017_dataset_dir, usage='all', language_pair=('de', 'en'))
About IWSLT2017 dataset:
IWSLT is an international oral translation conference, a major annual scientific conference dedicated to all aspects
of oral translation. The MT task of the IWSLT evaluation activity constitutes a data set, which can be publicly
obtained through the WIT3 website wit3.fbk.eu. The IWSLT2017 data set involves German, English, Italian, Dutch, and
Romanian. The data set includes translations in any two different languages.
You can unzip the original IWSLT2017 dataset files into this directory structure and read by MindSpore's API. You
need to decompress the dataset package in texts/DeEnItNlRo/DeEnItNlRo directory to get the DeEnItNlRo-DeEnItNlRo
subdirectory.
.. code-block::
.
└── iwslt2017_dataset_directory
└── DeEnItNlRo
└── DeEnItNlRo
└── DeEnItNlRo-DeEnItNlRo
├── IWSLT17.TED.dev2010.de-en.de.xml
├── train.tags.de-en.de
├── ...
Citation:
.. code-block::
@inproceedings{cettoloEtAl:EAMT2012,
Address = {Trento, Italy},
Author = {Mauro Cettolo and Christian Girardi and Marcello Federico},
Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation
(EAMT)},
Date = {28-30},
Month = {May},
Pages = {261--268},
Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks},
Year = {2012}}
"""
@check_iwslt2017_dataset
def __init__(self, dataset_dir, usage=None, language_pair=None, num_samples=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
self.language_pair = replace_none(language_pair, ["de", "en"])
def parse(self, children=None):
return cde.IWSLT2017Node(self.dataset_dir, self.usage, self.language_pair, self.num_samples,
self.shuffle_flag, self.num_shards, self.shard_id)
class KMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the KMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If `dataset_dir` does not contain data files.
RuntimeError: If `num_parallel_workers` exceeds the max thread numbers.
RuntimeError: If `sampler` and `shuffle` are specified at the same time.
RuntimeError: If `sampler` and sharding are specified at the same time.
RuntimeError: If `num_shards` is specified but `shard_id` is None.
RuntimeError: If `shard_id` is specified but `num_shards` is None.
ValueError: If `shard_id` is invalid (out of range [0, `num_shards`]).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> kmnist_dataset_dir = "/path/to/kmnist_dataset_directory"
>>>
>>> # Read 3 samples from KMNIST dataset
>>> dataset = ds.KMnistDataset(dataset_dir=kmnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In kmnist_dataset dataset, each dictionary has keys "image" and "label"
About KMNIST dataset:
KMNIST is a dataset, adapted from Kuzushiji Dataset, as a drop-in replacement for MNIST dataset,
which is the most famous dataset in the machine learning community.
Here is the original KMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── kmnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@online{clanuwat2018deep,
author = {Tarin Clanuwat and Mikel Bober-Irizar and Asanobu Kitamoto and
Alex Lamb and Kazuaki Yamamoto and David Ha},
title = {Deep Learning for Classical Japanese Literature},
date = {2018-12-03},
year = {2018},
eprintclass = {cs.CV},
eprinttype = {arXiv},
eprint = {cs.CV/1812.01718},
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.KMnistNode(self.dataset_dir, self.usage, self.sampler)
class MnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mnist_dataset_dir = "/path/to/mnist_dataset_directory"
>>>
>>> # Read 3 samples from MNIST dataset
>>> dataset = ds.MnistDataset(dataset_dir=mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In mnist_dataset dataset, each dictionary has keys "image" and "label"
About MNIST dataset:
The MNIST database of handwritten digits has a training set of 60,000 examples,
and a test set of 10,000 examples. It is a subset of a larger set available from
NIST. The digits have been size-normalized and centered in a fixed-size image.
Here is the original MNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@article{lecun2010mnist,
title = {MNIST handwritten digit database},
author = {LeCun, Yann and Cortes, Corinna and Burges, CJ},
journal = {ATT Labs [Online]},
volume = {2},
year = {2010},
howpublished = {http://yann.lecun.com/exdb/mnist}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.MnistNode(self.dataset_dir, self.usage, self.sampler)
class PennTreebankDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses PennTreebank datasets.
The generated dataset has one column :py:obj:`[text]`.
The tensor of column :py:obj:`text` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include `train`, `test`, 'valid' and `all`.
'train' will read from 42,068 train samples of string type,
'test' will read from 3,370 test samples of string type,
'valid' will read from 3,761 test samples of string type,
'all' will read from all 49,199 samples of string type (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Examples:
>>> penn_treebank_dataset_dir = "path/to/penn_treebank_dataset_directory"
>>> dataset = ds.PennTreebankDataset(dataset_dir=penn_treebank_dataset_dir, usage='all')
About PennTreebank dataset:
Penn Treebank (PTB) dataset, is widely used in machine learning for NLP (Natural Language Processing)
research. Word-level PTB does not contain capital letters, numbers, and punctuations, and the vocabulary
is capped at 10k unique words, which is relatively small in comparison to most modern datasets which
can result in a larger number of out of vocabulary tokens.
Here is the original PennTreebank dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── PennTreebank_dataset_dir
├── ptb.test.txt
├── ptb.train.txt
└── ptb.valid.txt
Citation:
.. code-block::
@techreport{Santorini1990,
added-at = {2014-03-26T23:25:56.000+0100},
author = {Santorini, Beatrice},
biburl = {https://www.bibsonomy.org/bibtex/234cdf6ddadd89376090e7dada2fc18ec/butonic},
file = {:Santorini - Penn Treebank tag definitions.pdf:PDF},
institution = {Department of Computer and Information Science, University of Pennsylvania},
interhash = {818e72efd9e4b5fae3e51e88848100a0},
intrahash = {34cdf6ddadd89376090e7dada2fc18ec},
keywords = {dis pos tagging treebank},
number = {MS-CIS-90-47},
timestamp = {2014-03-26T23:25:56.000+0100},
title = {Part-of-speech tagging guidelines for the {P}enn {T}reebank {P}roject},
url = {ftp://ftp.cis.upenn.edu/pub/treebank/doc/tagguide.ps.gz},
year = 1990
}
"""
@check_penn_treebank_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.PennTreebankNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class PhotoTourDataset(MappableDataset):
"""
A source dataset for reading and parsing the PhotoTour dataset.
The generated dataset with different usage has different output columns.
If train, the generated dataset has one column :py:obj:`[image]`,
else three columns :py:obj:`[image1, image2, matches]`.
The tensor of column :py:obj:`image`, :py:obj:`image1` and :py:obj:`image2` is of the uint8 type.
The tensor of column :py:obj:`matches` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of the dataset to load,
should be one of 'notredame', 'yosemite', 'liberty', 'notredame_harris',
'yosemite_harris' or 'liberty_harris'.
usage (str, optional): Usage of the dataset, can be `train` or `test` (Default=None, will be set to 'train').
When usage is `train`, number of samples for each `name` is
{'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, 'liberty_harris': 379587,
'yosemite_harris': 450912, 'notredame_harris': 325295}.
When usage is `test`, will read 100,000 samples for testing.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is not in ["train", "test"].
ValueError: If name is not in ["notredame", "yosemite", "liberty",
"notredame_harris", "yosemite_harris", "liberty_harris"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive. The table
below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 64 64 1
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from PhotoTour dataset.
>>> dataset = ds.PhotoTourDataset(dataset_dir="/path/to/photo_tour_dataset_directory",
... name='liberty', usage='train', num_samples=3)
>>>
>>> # In PhotoTourDataset dataset, if usage is 'train', each dictionary has key "image",
>>> # else has keys "image1" "image2" and "matches".
About PhotoTour dataset:
The data is taken from Photo Tourism reconstructions from Trevi Fountain (Rome), Notre Dame (Paris) and Half
Dome (Yosemite). Each dataset consists of a series of corresponding patches, which are obtained by projecting
3D points from Photo Tourism reconstructions back into the original images.
The dataset consists of 1024 x 1024 bitmap (.bmp) images, each containing a 16 x 16 array of image patches.
Each patch is sampled as 64 x 64 grayscale, with a canonical scale and orientation. For details of how the scale
and orientation is established, please see the paper. An associated metadata file info.txt contains the match
information. Each row of info.txt corresponds to a separate patch, with the patches ordered from left to right and
top to bottom in each bitmap image. The first number on each row of info.txt is the 3D point ID from which that
patch was sampled -- patches with the same 3D point ID are projected from the same 3D point (into different images).
The second number in info.txt corresponds to the image from which the patch was sampled, and is not used at present.
You can unzip the original PhotoTour dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── photo_tour_dataset_directory
├── liberty/
│ ├── info.txt // two columns: 3D_point_ID, unused
│ ├── m50_100000_100000_0.txt // seven columns: patch_ID1, 3D_point_ID1, unused1,
│ │ // patch_ID2, 3D_point_ID2, unused2, unused3
│ ├── patches0000.bmp // 1024*1024 pixels, with 16 * 16 patches.
│ ├── patches0001.bmp
│ ├── ...
├── yosemite/
│ ├── ...
├── notredame/
│ ├── ...
├── liberty_harris/
│ ├── ...
├── yosemite_harris/
│ ├── ...
├── notredame_harris/
│ ├── ...
Citation:
.. code-block::
@INPROCEEDINGS{4269996,
author={Winder, Simon A. J. and Brown, Matthew},
booktitle={2007 IEEE Conference on Computer Vision and Pattern Recognition},
title={Learning Local Image Descriptors},
year={2007},
volume={},
number={},
pages={1-8},
doi={10.1109/CVPR.2007.382971}
}
"""
@check_photo_tour_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "train")
def parse(self, children=None):
return cde.PhotoTourNode(self.dataset_dir, self.name, self.usage, self.sampler)
class Places365Dataset(MappableDataset):
"""
A source dataset for reading and parsing the Places365 dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train-standard`, `train-challenge` or `val`
(default=None, will be set to 'train-standard').
small (bool, optional): Use 256 * 256 images (True) or high resolution images (False) (default=False).
decode (bool, optional): Decode the images after reading (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
ValueError: If usage is not in ["train-standard", "train-challenge", "val"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> place365_dataset_dir = "/path/to/place365_dataset_directory"
>>>
>>> # Read 3 samples from Places365 dataset
>>> dataset = ds.Places365Dataset(dataset_dir=place365_dataset_dir, usage='train-standard',
... small=True, decode=True, num_samples=3)
>>>
>>> # In places365 dataset, each dictionary has keys "image" and "label".
About Places365 dataset:
Convolutional neural networks (CNNs) trained on the Places2 Database can be used for scene recognition as well as
generic deep scene features for visual recognition.
The author releases the data of Places365-Standard and the data of Places365-Challenge to the public.
Places365-Standard is the core set of Places2 Database, which has been used to train the Places365-CNNs. The author
will add other kinds of annotation on the Places365-Standard in the future. Places365-Challenge is the competition
set of Places2 Database, which has 6.2 million extra images compared to the Places365-Standard.
The Places365-Challenge will be used for the Places Challenge 2016.
You can unzip the original Places365 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└─├── categories_places365.txt
├── places365_train-standard.txt
├── places365_train-challenge.txt
├── val_large/
│ ├── Places365_val_00000001.jpg
│ ├── Places365_val_00000002.jpg
│ ├── Places365_val_00000003.jpg
│ ├── ...
├── val_256/
│ ├── ...
├── data_large_standard/
│ ├── ...
├── data_256_standard/
│ ├── ...
├── data_large_challenge/
│ ├── ...
├── data_256_challenge /
│ ├── ...
Citation:
.. code-block::
article{zhou2017places,
title={Places: A 10 million Image Database for Scene Recognition},
author={Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year={2017},
publisher={IEEE}
}
"""
@check_places365_dataset
def __init__(self, dataset_dir, usage=None, small=True, decode=False, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = os.path.abspath(dataset_dir)
self.usage = replace_none(usage, "train-standard")
self.small = small
self.decode = decode
def parse(self, children=None):
return cde.Places365Node(self.dataset_dir, self.usage, self.small, self.decode, self.sampler)
class QMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the QMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar when `compat` is True else a tensor both of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `test10k`, `test50k`, `nist`
or `all` (default=None, will read all samples).
compat (bool, optional): Whether the label for each example is class number (compat=True) or the full QMNIST
information (compat=False) (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> qmnist_dataset_dir = "/path/to/qmnist_dataset_directory"
>>>
>>> # Read 3 samples from QMNIST train dataset
>>> dataset = ds.QMnistDataset(dataset_dir=qmnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In QMNIST dataset, each dictionary has keys "image" and "label"
About QMNIST dataset:
The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to
match the MNIST preprocessing as closely as possible.
Through an iterative process, researchers tried to generate an additional 50k images of MNIST-like data.
They started with a reconstruction process given in the paper and used the Hungarian algorithm to find the best
matches between the original MNIST samples and their reconstructed samples.
Here is the original QMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── qmnist_dataset_dir
├── qmnist-train-images-idx3-ubyte
├── qmnist-train-labels-idx2-int
├── qmnist-test-images-idx3-ubyte
├── qmnist-test-labels-idx2-int
├── xnist-images-idx3-ubyte
└── xnist-labels-idx2-int
Citation:
.. code-block::
@incollection{qmnist-2019,
title = "Cold Case: The Lost MNIST Digits",
author = "Chhavi Yadav and L\'{e}on Bottou",\
booktitle = {Advances in Neural Information Processing Systems 32},
year = {2019},
publisher = {Curran Associates, Inc.},
}
"""
@check_qmnist_dataset
def __init__(self, dataset_dir, usage=None, compat=True, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
self.compat = compat
def parse(self, children=None):
return cde.QMnistNode(self.dataset_dir, self.usage, self.compat, self.sampler)
class MindDataset(MappableDataset, TextBaseDataset):
"""
A source dataset for reading and parsing MindRecord dataset.
The columns of generated dataset depend on the source MindRecord files.
Args:
dataset_files (Union[str, list[str]]): If dataset_file is a str, it represents for
a file name of one component of a mindrecord source, other files with identical source
in the same path will be found and loaded automatically. If dataset_file is a list,
it represents for a list of dataset files to be read directly.
columns_list (list[str], optional): List of columns to be read (default=None).
num_parallel_workers (int, optional): The number of readers (default=None).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=None, performs global shuffle).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are three levels of shuffling:
- Shuffle.GLOBAL: Global shuffle of all rows of data in dataset.
- Shuffle.FILES: Shuffle the file sequence but keep the order of data within each file.
- Shuffle.INFILE: Keep the file sequence the same but shuffle the data within each file.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, sampler is exclusive
with shuffle and block_reader). Support list: SubsetRandomSampler,
PkSampler, RandomSampler, SequentialSampler, DistributedSampler.
padded_sample (dict, optional): Samples will be appended to dataset, where
keys are the same as column_list.
num_padded (int, optional): Number of padding samples. Dataset size
plus num_padded should be divisible by num_shards.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mind_dataset_dir = ["/path/to/mind_dataset_file"] # contains 1 or multiple MindRecord files
>>> dataset = ds.MindDataset(dataset_files=mind_dataset_dir)
"""
def parse(self, children=None):
return cde.MindDataNode(self.dataset_files, self.columns_list, self.sampler, self.new_padded_sample,
self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))
@check_minddataset
def __init__(self, dataset_files, columns_list=None, num_parallel_workers=None, shuffle=None, num_shards=None,
shard_id=None, sampler=None, padded_sample=None, num_padded=None, num_samples=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle_to_bool(shuffle), num_shards=num_shards, shard_id=shard_id, cache=cache)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
if num_samples and shuffle in (Shuffle.FILES, Shuffle.INFILE):
raise ValueError("'Shuffle.FILES' or 'Shuffle.INFILE' and 'num_samples' "
"cannot be specified at the same time.")
self.shuffle_option = shuffle
if isinstance(dataset_files, list):
self.load_dataset = False
else:
self.load_dataset = True
self.dataset_files = dataset_files
self.columns_list = replace_none(columns_list, [])
if shuffle is False:
logger.warning("WARN: global shuffle is not used.")
if sampler is not None:
if isinstance(sampler, (
samplers.SubsetRandomSampler, samplers.SubsetSampler, samplers.PKSampler,
samplers.DistributedSampler,
samplers.RandomSampler, samplers.SequentialSampler)) is False:
raise ValueError("The sampler is not supported yet.")
self.padded_sample = padded_sample
self.num_padded = replace_none(num_padded, 0)
self.new_padded_sample = {}
if padded_sample:
for k, v in padded_sample.items():
if isinstance(v, np.ndarray):
self.new_padded_sample[k] = v.tobytes()
else:
self.new_padded_sample[k] = v
def _iter_fn(dataset, num_samples):
"""
Generator function wrapper for iterable dataset.
"""
if num_samples is not None and num_samples != 0:
ds_iter = iter(dataset)
for _ in range(num_samples):
try:
val = next(ds_iter)
except StopIteration:
return
# convert output tensors to ndarrays
yield _convert_row(val)
else:
for val in dataset:
# convert output tensors to ndarrays
yield _convert_row(val)
def _generator_fn(generator, num_samples):
"""
Generator function wrapper for generator function dataset.
"""
if num_samples is not None and num_samples != 0:
gen_iter = generator()
for _ in range(num_samples):
try:
val = next(gen_iter)
except StopIteration:
return
yield val
else:
gen_iter = generator()
for val in gen_iter:
yield val
def _cpp_sampler_fn(sample_ids, dataset):
"""
Generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
for i in sample_ids:
val = dataset[i]
# convert output tensors to ndarrays
yield _convert_row(val)
def _cpp_sampler_fn_mp(sample_ids, sample_fn):
"""
Multiprocessing generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
return sample_fn.process(sample_ids)
def _fill_worker_indices(workers, indices, idx):
"""
Worker index queue filler, fill worker index queue in round robin order.
"""
num_worker = len(workers)
while idx < len(indices):
try:
workers[idx % num_worker].put(indices[idx])
idx += 1
except queue.Full:
break
return idx
def _check_shm_usage(num_worker, queue_size, max_rowsize, num_queues=1):
"""
Check sufficient shared memory is available for shared memory queues
when training in parallel mode.
"""
threshold_ratio = 0.8
if platform.system().lower() not in {"windows", "darwin"}:
shm_estimate_usage = _get_device_num() * num_worker * num_queues * \
(queue_size + 2) * max_rowsize * 1024 * 1024
try:
shm_available = psutil.disk_usage('/dev/shm').free
if shm_estimate_usage >= threshold_ratio * shm_available:
raise RuntimeError(
"Insufficient shared memory available. Required: {}, Available: {}. "
"The required memory can't exceed 80% of the available shared memory. "
"Recommend to set_enable_shared_mem to False, reduce max_rowsize or reduce num_parallel_workers."
.format(shm_estimate_usage, shm_available))
except FileNotFoundError:
raise RuntimeError("Expected /dev/shm to exist.")
def _convert_row(row):
"""
Convert Op return value to numpy
"""
value = []
if isinstance(row, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
# convert each column in row into numpy array
for x in row:
if isinstance(x, bytes): # got image bytes from a file
value.append(np.frombuffer(x, np.uint8))
elif isinstance(x, Tensor): # got mindspore.Tensor
value.append(x.asnumpy())
elif isinstance(x, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
else:
value.append(np.array(x, copy=False))
return tuple(value)
class SamplerFn:
"""
Multiprocessing or multithread generator function wrapper master process.
"""
def __init__(self, dataset, num_worker, multi_process, max_rowsize):
self.workers = []
self.num_worker = num_worker
self.multi_process = multi_process
self.need_join = False
self.ppid = os.getpid()
self.pids = []
self.check_interval = 300 # the interval of check queue's size
self._final_join = True
# Event for end of epoch
if multi_process is True:
try:
self.eof = multiprocessing.Event()
except Exception:
raise RuntimeError("Init multiprocessing.Event() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
else:
self.eof = threading.Event()
# Create workers
# get default queue size and adjust queuesize per worker if there are large # workers
queue_size = get_prefetch_size()
queue_size = min(queue_size, queue_size * 4 // num_worker)
queue_size = max(2, queue_size)
if multi_process and get_enable_shared_mem():
_check_shm_usage(num_worker, queue_size, max_rowsize)
for _ in range(num_worker):
if multi_process is True:
try:
worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size)
except Exception:
raise RuntimeError("Init multiprocessing.Queue() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
worker.daemon = True
# When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,
# which may cause deadlock. Therefore, the subprocess startup is performed in che initialization phase.
# In this phase, the main process is not locked.
worker.start()
self.pids.append(worker.pid)
self.need_join = True
else:
worker = _GeneratorWorkerMt(dataset, self.eof)
worker.daemon = True
self.workers.append(worker)
if multi_process is True and platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.workers))
self.watch_dog.daemon = True
self.watch_dog.start()
if self._final_join is True:
self._jointhread = Finalize(
self.watch_dog, self._finalize_join,
args=(weakref.ref(self.watch_dog), self.eot),
exitpriority=-5
)
def process(self, indices):
"""
The main process, start the child process or child thread, and fill the index queue.
Get the result and return.
"""
for w in self.workers:
# Check whether the queue of the subprocess is empty.
if not w.queue_empty():
raise Exception("The queue of the subprocess is not empty.")
# Start all workers
if not w.is_alive():
w.start()
# Fill initial index queues
idx_cursor = 0
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
# Fetch results
for i in range(len(indices)):
if self.eof.is_set():
self._stop_subprocess()
return
if self.multi_process is True and not psutil.pid_exists(self.workers[i % self.num_worker].pid):
self._stop_subprocess()
return
# Fetch result and put index
try:
# To avoid get timeout from queue, check the res_queue size.
start_time = int(time.time())
wait_count = 1
while self.workers[i % self.num_worker].res_queue.empty():
time.sleep(0.1)
cost_time = int(time.time()) - start_time
if cost_time / self.check_interval >= wait_count:
wait_count += 1
logger.warning("It has been waiting for " + str(cost_time) + "s because the multi "
"thread/process of the generator generates data had been hung by gil lock.")
result = self.workers[i % self.num_worker].get()
if isinstance(result, ExceptionHandler):
result.reraise()
except queue.Empty:
self._stop_subprocess()
raise Exception("Generator worker process timeout.")
except KeyboardInterrupt:
self._stop_subprocess()
raise Exception("Generator worker receives KeyboardInterrupt.")
if self.eof.is_set():
self._stop_subprocess()
return
if idx_cursor < len(indices):
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
yield _convert_row(result)
def _stop_subprocess(self):
"""Only the main process can call join."""
if self.need_join is True and self.ppid == os.getpid():
self.eof.set()
self.need_join = False
for w in self.workers:
if self.multi_process is True and hasattr(w, '_closed') and w._closed is False: # pylint: disable=W0212
w.join()
self._abort_watchdog()
def _abort_watchdog(self):
if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():
self.eot.set()
@classmethod
def _finalize_join(cls, twr, eot):
thread = twr()
if thread is not None:
if eot is not None and not eot.is_set():
eot.set()
thread.join()
def __del__(self):
self._stop_subprocess()
def _subprocess_handle(eof, signum, frame):
threading.Thread(target=eof.set()).start()
def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiprocessing):
"""
Multithread or multiprocess generator worker process loop.
"""
if is_multiprocessing:
signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
while True:
# Fetch index, block
try:
idx = idx_queue.get(timeout=1)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Empty:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If end-of-file (eof) is not set, continue to get data from idx_queue
continue
if idx is None:
# When the queue is out of scope from master process, a None item can be fetched from the queue.
# Upon receiving None, worker process should check if eof is set.
if not eof.is_set():
raise Exception("")
return
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# Fetch data, any exception from __getitem__ will terminate worker and timeout master process
try:
result = dataset[idx]
except Exception:
result = ExceptionHandler(where="in GeneratorDataset worker process")
# Send data, block
while True:
try:
result_queue.put(result, timeout=5)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Full:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If eof is not set, continue to put data to result_queue
continue
break
del result, idx
class _GeneratorWorkerMt(threading.Thread):
"""
Worker process for multi-thread Generator.
"""
def __init__(self, dataset, eof):
self.idx_queue = queue.Queue(16)
self.res_queue = queue.Queue(16)
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty")
return False
return True
class _GeneratorWorkerMp(multiprocessing.Process):
"""
Worker process for multiprocess Generator.
"""
def __init__(self, dataset, eof, max_rowsize, queue_size):
self.idx_queue = multiprocessing.Queue(queue_size)
if get_enable_shared_mem():
self.res_queue = _SharedQueue(queue_size, max_rowsize=max_rowsize)
else:
self.res_queue = multiprocessing.Queue(queue_size)
self.idx_queue._joincancelled = True # pylint: disable=W0212
self.res_queue._joincancelled = True # pylint: disable=W0212
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
# Relax 10s to 30s, since it sometimes will cause "Generator worker process timeout"
# when we run too many iterators with infinite epoch(num_epoch=-1)
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty.")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty.")
return False
return True
class GeneratorDataset(MappableDataset, TextBaseDataset):
"""
A source dataset that generates data from Python by invoking Python data source each epoch.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
source (Union[Callable, Iterable, Random Accessible]):
A generator callable object, an iterable Python object or a random accessible Python object.
Callable source is required to return a tuple of NumPy arrays as a row of the dataset on source().next().
Iterable source is required to return a tuple of NumPy arrays as a row of the dataset on
iter(source).next().
Random accessible source is required to return a tuple of NumPy arrays as a row of the dataset on
source[idx].
column_names (Union[str, list[str]], optional): List of column names of the dataset (default=None). Users are
required to provide either column_names or schema.
column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None).
If provided, sanity check will be performed on generator output.
schema (Union[Schema, str], optional): Path to the JSON schema file or schema object (default=None). Users are
required to provide either column_names or schema. If both are provided, schema will be used.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the maximum
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=True).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default 6 MB).
Raises:
RuntimeError: If source raises an exception during execution.
RuntimeError: If len of column_names does not match output len of source.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Input `source` accept user defined Python function(PyFuncs), Do not add network computing operators from
mindspore.nn and mindspore.ops or others into this `source`.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> import numpy as np
>>>
>>> # 1) Multidimensional generator function as callable input.
>>> def generator_multidimensional():
... for i in range(64):
... yield (np.array([[i, i + 1], [i + 2, i + 3]]),)
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["multi_dimensional_data"])
>>>
>>> # 2) Multi-column generator function as callable input.
>>> def generator_multi_column():
... for i in range(64):
... yield np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multi_column, column_names=["col1", "col2"])
>>>
>>> # 3) Iterable dataset as iterable input.
>>> class MyIterable:
... def __init__(self):
... self._index = 0
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __next__(self):
... if self._index >= len(self._data):
... raise StopIteration
... else:
... item = (self._data[self._index], self._label[self._index])
... self._index += 1
... return item
...
... def __iter__(self):
... self._index = 0
... return self
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyIterable(), column_names=["data", "label"])
>>>
>>> # 4) Random accessible dataset as random accessible input.
>>> class MyAccessible:
... def __init__(self):
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __getitem__(self, index):
... return self._data[index], self._label[index]
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyAccessible(), column_names=["data", "label"])
>>>
>>> # list, dict, tuple of Python is also random accessible
>>> dataset = ds.GeneratorDataset(source=[(np.array(0),), (np.array(1),), (np.array(2),)], column_names=["col"])
"""
@check_generatordataset
def __init__(self, source, column_names=None, column_types=None, schema=None, num_samples=None,
num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None,
python_multiprocessing=True, max_rowsize=6):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id)
if isinstance(source, builtins.zip):
# Although zip is iteratable, it does not have the feature of repeated iteration, so pass it to the array.
self.source = [item for item in source]
else:
self.source = source
self.prepared_source = None # source to be sent to C++
if hasattr(self, 'operator_mixed') and getattr(self, 'operator_mixed') is True:
self.num_parallel_workers = 1
logger.warning(
"Input 'source' of 'GeneratorDataset' includes network computing operators like in mindspore.nn, "
"mindspore.ops, mindspore.numpy module and etc, which do not support multi-thread compiling, recommend"
" to replace it with python implemented operator like numpy etc. Here decrease 'num_parallel_workers' "
"into 1.")
self.python_multiprocessing = python_multiprocessing
self.column_names = to_list(column_names)
if column_types is not None:
self.column_types = mstypelist_to_detypelist(column_types)
else:
self.column_types = []
self.schema = schema
if schema is not None:
self.schema = schema
if not isinstance(schema, Schema):
self.schema = Schema(schema)
# Move get dataset_size by len from parse to here, because self.source will
# lose attribution of '__len__' after deepcopy.
self.source_len = -1 # unknown
if hasattr(self.source, "__len__"):
self.source_len = len(self.source)
self.max_rowsize = max_rowsize
self.sample_fn = None
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
new_op = self.__safe_deepcopy__(memodict, exclude=("source", "__transfer_dataset__"))
sample_fn = None
if new_op.sampler is not None and hasattr(self.source, "__getitem__"):
# The reason why there is a try catch here is because when the new op is being constructed with shared
# memory enabled, there will be an exception thrown if there is not enough shared memory available
if self.source_len == -1:
raise RuntimeError("Attempt to construct a random access dataset, '__len__' method is required!")
try:
if new_op.num_parallel_workers > 1:
self.__validate_memory_usage()
sample_fn = SamplerFn(self.source, new_op.num_parallel_workers, self.python_multiprocessing,
self.max_rowsize)
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn_mp(sample_ids, sample_fn))
else:
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn(sample_ids, self.source))
new_op.sample_fn = sample_fn
except RuntimeError as e:
raise Exception(str(e))
else:
try:
new_op.sampler = None
new_op.sample_fn = sample_fn
new_op.source_len = min(new_op.source_len,
new_op.num_samples) if new_op.num_samples != 0 else new_op.source_len
iter(self.source)
except TypeError:
# Use generator function if input callable
new_op.prepared_source = (lambda: _generator_fn(self.source, new_op.num_samples))
else:
# Use iterator function if input is iterable
# Random accessible input is also iterable
new_op.prepared_source = (lambda: _iter_fn(self.source, new_op.num_samples))
return new_op
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
def parse(self, children=None):
if self.schema is None:
return cde.GeneratorNode(self.prepared_source, self.column_names, self.column_types, self.source_len,
self.sampler, self.num_parallel_workers)
schema = self.schema
if isinstance(schema, Schema):
schema = self.schema.cpp_schema
return cde.GeneratorNode(self.prepared_source, schema, self.source_len, self.sampler,
self.num_parallel_workers)
def __validate_memory_usage(self):
"""
Check memory usage when mulit-processing mode, when 85% prompt warning and 100% raise error.
"""
if self.python_multiprocessing:
# if use num_parallel_workers is to large when python_multiprocessing=True which would cause
# OOM error get the num_shards
valid_num_shards = 1
if isinstance(self.sampler, samplers.DistributedSampler):
valid_num_shards = self.sampler.num_shards
elif self.num_shards is not None:
valid_num_shards = self.num_shards
# get process memory usage
process = psutil.Process(os.getpid())
process_memory = process.memory_info().rss
sys_memory_free = psutil.virtual_memory().free
total_memory_maybe_used = process_memory * self.num_parallel_workers * valid_num_shards
if total_memory_maybe_used / sys_memory_free > 0.85:
valid_num_worker = math.floor(sys_memory_free * 0.85 / valid_num_shards / process_memory)
valid_num_worker = 1 if valid_num_worker <= 0 else valid_num_worker
info = "GeneratorDataset num_parallel_workers: " + str(self.num_parallel_workers) + \
" is too large which maybe cause a lot of memory occupation (>85%) or out of memory(OOM) " \
"during multi process running. Therefore, it is recommended to reduce num_parallel_workers to " \
+ str(valid_num_worker) + " or smaller."
logger.warning(info)
class TFRecordDataset(SourceDataset, TextBaseDataset):
"""
A source dataset for reading and parsing datasets stored on disk in TFData format.
The columns of generated dataset depend on the source TFRecord files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the meta data from the TFData file is considered the schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns).
num_samples (int, optional): The number of samples (rows) to be included in the dataset (default=None).
If num_samples is None and numRows(parsed from schema) does not exist, read the full dataset;
If num_samples is None and numRows(parsed from schema) is greater than 0, read numRows rows;
If both num_samples and numRows(parsed from schema) are greater than 0, read num_samples rows.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
shard_equal_rows (bool, optional): Get equal rows for all shards(default=False). If shard_equal_rows
is false, number of rows of each shard may be not equal, and may lead to a failure in distributed training.
When the number of samples of per TFRecord file are not equal, it is suggested to set to true.
This argument should only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> tfrecord_dataset_dir = ["/path/to/tfrecord_dataset_file"] # contains 1 or multiple TFRecord files
>>> tfrecord_schema_file = "/path/to/tfrecord_schema_file"
>>>
>>> # 1) Get all rows from tfrecord_dataset_dir with no explicit schema.
>>> # The meta-data in the first row will be used as a schema.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir)
>>>
>>> # 2) Get all rows from tfrecord_dataset_dir with user-defined schema.
>>> schema = ds.Schema()
>>> schema.add_column(name='col_1d', de_type=mstype.int64, shape=[2])
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=schema)
>>>
>>> # 3) Get all rows from tfrecord_dataset_dir with schema file.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=tfrecord_schema_file)
"""
@check_tfrecorddataset
def __init__(self, dataset_files, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.schema = schema
self.columns_list = replace_none(columns_list, [])
self.shard_equal_rows = replace_none(shard_equal_rows, False)
if self.schema is not None and (self.num_samples is None or self.num_samples == 0):
self.num_samples = Schema.get_num_rows(self.schema)
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.TFRecordNode(self.dataset_files, schema, self.columns_list, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id, self.shard_equal_rows)
class ManifestDataset(MappableDataset):
"""
A source dataset for reading images from a Manifest file.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint64 type.
Args:
dataset_file (str): File to be read.
usage (str, optional): Acceptable usages include `train`, `eval` and `inference` (default= `train`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
class_indexing (dict, optional): A str-to-int mapping from label name to index
(default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
decode (bool, optional): decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max number of samples per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> # 1) Read all samples specified in manifest_dataset_dir dataset with 8 threads for training
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, usage="train", num_parallel_workers=8)
>>>
>>> # 2) Read samples (specified in manifest_file.manifest) for shard 0 in a 2-way distributed training setup
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, num_shards=2, shard_id=0)
"""
@check_manifestdataset
def __init__(self, dataset_file, usage="train", num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_file = dataset_file
self.decode = replace_none(decode, False)
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
def parse(self, children=None):
return cde.ManifestNode(self.dataset_file, self.usage, self.sampler, self.class_indexing, self.decode)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir)
>>> class_indexing = dataset.get_class_indexing()
"""
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class AGNewsDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses AG News datasets.
The generated dataset has three columns: :py:obj:`[index, title, description]`.
The tensor of column :py:obj:`index` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`description` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include `train`, `test` and `all` (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Examples:
>>> ag_news_dataset_dir = "/path/to/ag_news_dataset_file"
>>> dataset = ds.AGNewsDataset(dataset_dir=ag_news_dataset_dir, usage='all')
About AGNews dataset:
AG is a collection of over 1 million news articles. The news articles were collected
by ComeToMyHead from over 2,000 news sources in over 1 year of activity. ComeToMyHead
is an academic news search engine that has been in operation since July 2004.
The dataset is provided by academics for research purposes such as data mining
(clustering, classification, etc.), information retrieval (ranking, searching, etc.),
xml, data compression, data streaming, and any other non-commercial activities.
AG's news topic classification dataset was constructed by selecting the four largest
classes from the original corpus. Each class contains 30,000 training samples and
1,900 test samples. The total number of training samples in train.csv is 120,000
and the number of test samples in test.csv is 7,600.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── ag_news_dataset_dir
├── classes.txt
├── train.csv
├── test.csv
└── readme.txt
Citation:
.. code-block::
@misc{zhang2015characterlevel,
title={Character-level Convolutional Networks for Text Classification},
author={Xiang Zhang and Junbo Zhao and Yann LeCun},
year={2015},
eprint={1509.01626},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
@check_ag_news_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None,
num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.AGNewsNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class AmazonReviewDataset(SourceDataset):
"""
A source dataset that reads and parses Amazon Review Polarity and Amazon Review Full datasets.
The generated dataset has three columns: :py:obj:`[label, title, content]`.
The tensor of column :py:obj:`label` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the Amazon Review Polarity dataset
or the Amazon Review Full dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` (default= `all`).
For Polarity dataset, `train` will read from 3,600,000 train samples,
`test` will read from 400,000 test samples,
`all` will read from all 4,000,000 samples.
For Full dataset, `train` will read from 3,000,000 train samples,
`test` will read from 650,000 test samples,
`all` will read from all 3,650,000 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to be read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the mindspore.dataset.config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> amazon_review_dataset_dir = "/path/to/amazon_review_dataset_dir"
>>> dataset = ds.AmazonReviewDataset(dataset_dir=amazon_review_dataset_dir, usage='all')
About AmazonReview Dataset:
The Amazon reviews full dataset consists of reviews from Amazon. The data span a period of 18 years, including ~35
million reviews up to March 2013. Reviews include product and user information, ratings, and a plaintext review.
The dataset is mainly used for text classification, given the content and title, predict the correct star rating.
The Amazon reviews polarity dataset is constructed by taking review score 1 and 2 as negative, 4 and 5 as positive.
Samples of score 3 is ignored. In the dataset, class 1 is the negative and class 2 is the positive.
The Amazon Reviews Polarity and Amazon Reviews Full datasets have the same directory structures.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── amazon_review_dir
├── train.csv
├── test.csv
└── readme.txt
Citation:
.. code-block::
@article{zhang2015character,
title={Character-level convolutional networks for text classification},
author={Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
journal={Advances in neural information processing systems},
volume={28},
pages={649--657},
year={2015}
}
"""
@check_amazon_review_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.AmazonReviewNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class Cifar10Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar10 dataset.
This api only supports parsing Cifar10 file in binary version now.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar10_dataset_dir = "/path/to/cifar10_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR10 dataset in sequence
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR10 dataset
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from CIFAR10 dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In CIFAR10 dataset, each dictionary has keys "image" and "label"
About CIFAR-10 dataset:
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
with 6000 images per class. There are 50000 training images and 10000 test images.
The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
Here is the original CIFAR-10 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-10-batches-bin
├── data_batch_1.bin
├── data_batch_2.bin
├── data_batch_3.bin
├── data_batch_4.bin
├── data_batch_5.bin
├── test_batch.bin
├── readme.html
└── batches.meta.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar10Node(self.dataset_dir, self.usage, self.sampler)
class Cifar100Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar100 dataset.
The generated dataset has three columns :py:obj:`[image, coarse_label, fine_label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`coarse_label` and :py:obj:`fine_labels` are each a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and shuffle
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar100_dataset_dir = "/path/to/cifar100_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR100 dataset in sequence
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR100 dataset
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # In CIFAR100 dataset, each dictionary has 3 keys: "image", "fine_label" and "coarse_label"
About CIFAR-100 dataset:
This dataset is just like the CIFAR-10, except it has 100 classes containing 600 images
each. There are 500 training images and 100 testing images per class. The 100 classes in
the CIFAR-100 are grouped into 20 superclasses. Each image comes with a "fine" label (the
class to which it belongs) and a "coarse" label (the superclass to which it belongs).
Here is the original CIFAR-100 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-100-binary
├── train.bin
├── test.bin
├── fine_label_names.txt
└── coarse_label_names.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar100Node(self.dataset_dir, self.usage, self.sampler)
class RandomDataset(SourceDataset):
"""
A source dataset that generates random data.
Args:
total_rows (int, optional): Number of samples for the dataset to generate
(default=None, number of samples is random).
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the random dataset generates a random schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns)
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
"""
@check_random_dataset
def __init__(self, total_rows=None, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
cache=None, shuffle=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.total_rows = total_rows
if schema is not None:
self.total_rows = replace_none(total_rows, Schema.get_num_rows(schema))
self.schema = schema
self.columns_list = replace_none(columns_list, [])
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.RandomNode(self.total_rows, schema, self.columns_list)
class Schema:
"""
Class to represent a schema of a dataset.
Args:
schema_file(str): Path of the schema file (default=None).
Returns:
Schema object, schema info about dataset.
Raises:
RuntimeError: If schema file failed to load.
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> # Create schema; specify column name, mindspore.dtype and shape of the column
>>> schema = ds.Schema()
>>> schema.add_column(name='col1', de_type=mstype.int64, shape=[2])
"""
@check_schema
def __init__(self, schema_file=None):
self.schema_file = replace_none(schema_file, "")
self.cpp_schema = cde.SchemaObj(self.schema_file)
@check_add_column
def add_column(self, name, de_type, shape=None):
"""
Add new column to the schema.
Args:
name (str): The new name of the column.
de_type (str): Data type of the column.
shape (list[int], optional): Shape of the column
(default=None, [-1] which is an unknown shape of rank 1).
Raises:
ValueError: If column type is unknown.
"""
if isinstance(de_type, typing.Type):
de_type = mstype_to_detype(de_type)
col_type = str(de_type)
else:
col_type = str(cde.DataType(de_type))
if shape is None:
self.cpp_schema.add_column(name, col_type)
else:
self.cpp_schema.add_column(name, col_type, shape)
def parse_columns(self, columns):
"""
Parse the columns and add it to self.
Args:
columns (Union[dict, list[dict], tuple[dict]]): Dataset attribute information, decoded from schema file.
- list[dict], 'name' and 'type' must be in keys, 'shape' optional.
- dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.
Raises:
RuntimeError: If failed to parse columns.
RuntimeError: If column's name field is missing.
RuntimeError: If column's type field is missing.
Examples:
>>> from mindspore.dataset import Schema
>>> schema = Schema()
>>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},
... {'name': 'label', 'type': 'int8', 'shape': [1]}]
>>> schema.parse_columns(columns1)
>>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}
>>> schema.parse_columns(columns2)
"""
self.cpp_schema.parse_columns(json.dumps(columns, indent=2))
def to_json(self):
"""
Get a JSON string of the schema.
Returns:
str, JSON string of the schema.
"""
return self.cpp_schema.to_json()
def from_json(self, json_obj):
"""
Get schema file from JSON object.
Args:
json_obj(dictionary): Object of JSON parsed.
Raises:
RuntimeError: if there is unknown item in the object.
RuntimeError: if dataset type is missing in the object.
RuntimeError: if columns are missing in the object.
"""
self.cpp_schema.from_string(json.dumps(json_obj, indent=2))
def __str__(self):
return self.to_json()
@staticmethod
def get_num_rows(schema):
schema_obj = schema
if not isinstance(schema_obj, Schema):
schema_obj = Schema(schema_obj)
return schema_obj.cpp_schema.get_num_rows()
class UDPOSDataset(SourceDataset):
"""
A source dataset that reads and parses UDPOS dataset.
The generated dataset has three columns: :py:obj:`[word, universal, stanford]`.
The tensor of column :py:obj:`word` is of the string type.
The tensor of column :py:obj:`universal` is of the string type.
The tensor of column :py:obj:`stanford` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `valid` or `all`. `train` will read from
12,543 train samples, `test` will read from 2,077 test samples, `valid` will read from 2,002 test samples,
`all` will read from all 16,622 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> udpos_dataset_dir = "/path/to/udpos_dataset_dir"
>>> dataset = ds.UDPOSDataset(dataset_files=udpos_dataset_dir, usage='all')
"""
@check_udpos_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None,
shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.UDPOSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class USPSDataset(SourceDataset):
"""
A source dataset for reading and parsing the USPS dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be "train", "test" or "all". "train" will read from 7,291
train samples, "test" will read from 2,007 test samples, "all" will read from all 9,298 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> usps_dataset_dir = "/path/to/usps_dataset_directory"
>>>
>>> # Read 3 samples from USPS dataset
>>> dataset = ds.USPSDataset(dataset_dir=usps_dataset_dir, num_samples=3)
>>>
>>> # Note: In USPS dataset, each dictionary has keys "image" and "label"
About USPS dataset:
USPS is a digit dataset automatically scanned from envelopes by the U.S. Postal Service
containing a total of 9,298 16×16 pixel grayscale samples.
The images are centered, normalized and show a broad range of font styles.
Here is the original USPS dataset structure.
You can download and unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── usps_dataset_dir
├── usps
├── usps.t
Citation:
.. code-block::
@article{hull1994database,
title={A database for handwritten text recognition research},
author={Hull, Jonathan J.},
journal={IEEE Transactions on pattern analysis and machine intelligence},
volume={16},
number={5},
pages={550--554},
year={1994},
publisher={IEEE}
}
"""
@check_usps_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.USPSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class WikiTextDataset(SourceDataset):
"""
A source dataset that reads and parses WikiText2 and WikiText103 datasets.
The generated dataset has one column :py:obj:`[text]`.
The tensor of column :py:obj:`text` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include `train`, `test`, 'valid' and `all`(default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Examples:
>>> wiki_text_dataset_dir = "/path/to/wiki_text_dataset_directory"
>>> dataset = ds.WikiTextDataset(dataset_dir=wiki_text_dataset_dir, usage='all')
About WikiTextDataset dataset:
The WikiText Long Term Dependency Language Modeling Dataset is an English lexicon containing 100 million words.
These terms are drawn from Wikipedia's premium and benchmark articles, including versions of Wikitext2 and
Wikitext103. For WikiText2, it has 36718 lines in wiki.train.tokens, 4358 lines in wiki.test.tokens and
3760 lines in wiki.valid.tokens. For WikiText103, it has 1801350 lines in wiki.train.tokens, 4358 lines in
wiki.test.tokens and 3760 lines in wiki.valid.tokens.
Here is the original WikiText dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── WikiText2/WikiText103
├── wiki.train.tokens
├── wiki.test.tokens
├── wiki.valid.tokens
Citation:
.. code-block::
@article{merity2016pointer,
title={Pointer sentinel mixture models},
author={Merity, Stephen and Xiong, Caiming and Bradbury, James and Socher, Richard},
journal={arXiv preprint arXiv:1609.07843},
year={2016}
}
"""
@check_wiki_text_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.WikiTextNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class VOCDataset(MappableDataset):
"""
A source dataset for reading and parsing VOC dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[label, dtype=uint32]`, :py:obj:`[difficult, dtype=uint32]`, :py:obj:`[truncate, dtype=uint32]`.
- task = :py:obj:`Segmentation`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[target,dtype=uint8]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Set the task type of reading voc data, now only support `Segmentation` or `Detection`
(default= `Segmentation`).
usage (str, optional): Set the task type of ImageSets(default= `train`). If task is `Segmentation`, image and
annotation list will be loaded in ./ImageSets/Segmentation/usage + ".txt"; If task is `Detection`, image and
annotation list will be loaded in ./ImageSets/Main/usage + ".txt"; if task and usage are not set, image and
annotation list will be loaded in ./ImageSets/Segmentation/train.txt as default.
class_indexing (dict, optional): A str-to-int mapping from label name to index, only valid in
`Detection` task (default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column named
:py:obj:`[_meta-filename, dtype=string]` will be output at the end (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If xml of Annotations is an invalid format.
RuntimeError: If xml of Annotations loss attribution of `object`.
RuntimeError: If xml of Annotations loss attribution of `bndbox`.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If task is not equal 'Segmentation' or 'Detection'.
ValueError: If task equal 'Segmentation' but class_indexing is not None.
ValueError: If txt related to mode is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op
is added to remove the prefix('_meta-').
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> # 1) Read VOC data for segmentation training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Segmentation", usage="train")
>>>
>>> # 2) Read VOC data for detection training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train")
>>>
>>> # 3) Read all VOC dataset samples in voc_dataset_dir with 8 threads in random order
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... num_parallel_workers=8)
>>>
>>> # 4) Read then decode all VOC dataset samples in voc_dataset_dir in sequence
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... decode=True, shuffle=False)
>>>
>>> # In VOC dataset, if task='Segmentation', each dictionary has keys "image" and "target"
>>> # In VOC dataset, if task='Detection', each dictionary has keys "image" and "annotation"
About VOC dataset.
The PASCAL Visual Object Classes (VOC) challenge is a benchmark in visual
object category recognition and detection, providing the vision and machine
learning communities with a standard dataset of images and annotation, and
standard evaluation procedures.
You can unzip the original VOC-2012 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── voc2012_dataset_dir
├── Annotations
│ ├── 2007_000027.xml
│ ├── 2007_000032.xml
│ ├── ...
├── ImageSets
│ ├── Action
│ ├── Layout
│ ├── Main
│ └── Segmentation
├── JPEGImages
│ ├── 2007_000027.jpg
│ ├── 2007_000032.jpg
│ ├── ...
├── SegmentationClass
│ ├── 2007_000032.png
│ ├── 2007_000033.png
│ ├── ...
└── SegmentationObject
├── 2007_000032.png
├── 2007_000033.png
├── ...
Citation:
.. code-block::
@article{Everingham10,
author = {Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.},
title = {The Pascal Visual Object Classes (VOC) Challenge},
journal = {International Journal of Computer Vision},
volume = {88},
year = {2012},
number = {2},
month = {jun},
pages = {303--338},
biburl = {http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.html#bibtex},
howpublished = {http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html}
}
"""
@check_vocdataset
def __init__(self, dataset_dir, task="Segmentation", usage="train", class_indexing=None, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None,
cache=None, extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = replace_none(task, "Segmentation")
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.VOCNode(self.dataset_dir, self.task, self.usage, self.class_indexing, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection")
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task != "Detection":
raise NotImplementedError("Only 'Detection' support get_class_indexing.")
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class _Caltech101Dataset:
"""
Mainly for loading Caltech101 Dataset, and return two rows each time.
"""
def __init__(self, dataset_dir, target_type="category", decode=False):
self.dataset_dir = os.path.realpath(dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "101_ObjectCategories")
self.annotation_dir = os.path.join(self.dataset_dir, "Annotations")
self.target_type = target_type
if self.target_type == "category":
self.column_names = ["image", "category"]
elif self.target_type == "annotation":
self.column_names = ["image", "annotation"]
else:
self.column_names = ["image", "category", "annotation"]
self.decode = decode
self.classes = sorted(os.listdir(self.image_dir))
if "BACKGROUND_Google" in self.classes:
self.classes.remove("BACKGROUND_Google")
name_map = {"Faces": "Faces_2",
"Faces_easy": "Faces_3",
"Motorbikes": "Motorbikes_16",
"airplanes": "Airplanes_Side_2"}
self.annotation_classes = [name_map[class_name] if class_name in name_map else class_name
for class_name in self.classes]
self.image_index = []
self.image_label = []
for i, image_class in enumerate(self.classes):
sub_dir = os.path.join(self.image_dir, image_class)
if not os.path.isdir(sub_dir) or not os.access(sub_dir, os.R_OK):
continue
num_images = len(os.listdir(sub_dir))
self.image_index.extend(range(1, num_images + 1))
self.image_label.extend(num_images * [i])
def __getitem__(self, index):
image_file = os.path.join(self.image_dir, self.classes[self.image_label[index]],
"image_{:04d}.jpg".format(self.image_index[index]))
if not os.path.exists(image_file):
raise ValueError("The image file {} does not exist or permission denied!".format(image_file))
if self.decode:
image = np.asarray(Image.open(image_file).convert("RGB"))
else:
image = np.fromfile(image_file, dtype=np.uint8)
if self.target_type == "category":
return image, self.image_label[index]
annotation_file = os.path.join(self.annotation_dir, self.annotation_classes[self.image_label[index]],
"annotation_{:04d}.mat".format(self.image_index[index]))
if not os.path.exists(annotation_file):
raise ValueError("The annotation file {} does not exist or permission denied!".format(annotation_file))
annotation = loadmat(annotation_file)["obj_contour"]
if self.target_type == "annotation":
return image, annotation
return image, self.image_label[index], annotation
def __len__(self):
return len(self.image_index)
class Caltech101Dataset(GeneratorDataset):
"""
A source dataset that reads and parses Caltech101 dataset.
The columns of the generated dataset depend on the value of `target_type`.
When `target_type` is `category`, the columns are :py:obj:`[image, category]`.
When `target_type` is `annotation`, the columns are :py:obj:`[image, annotation]`.
When `target_type` is `all`, the columns are :py:obj:`[image, category, annotation]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`category` is of the uint32 type.
The tensor of column :py:obj:`annotation` is a 2-dimensional ndarray that stores the contour of the image
and consists of a series of points.
Args:
dataset_dir (str): Path to the root directory that contains the dataset. This root directory contains two
subdirectories, one is called 101_ObjectCategories, which stores images,
and the other is called Annotations, which stores annotations.
target_type (str, optional): Target of the image. If target_type is "category", return category represents
the target class. If target_type is "annotation", return annotation.
If target_type is "all", return category and annotation (default=None, means "category").
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
decode (bool, optional): Whether or not to decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If target_type is not set correctly.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> caltech101_dataset_directory = "/path/to/caltech101_dataset_directory"
>>>
>>> # 1) Read all samples (image files) in caltech101_dataset_directory with 8 threads
>>> dataset = ds.Caltech101Dataset(dataset_dir=caltech101_dataset_directory, num_parallel_workers=8)
>>>
>>> # 2) Read all samples (image files) with the target_type "annotation"
>>> dataset = ds.Caltech101Dataset(dataset_dir=caltech101_dataset_directory, target_type="annotation")
About Caltech101Dataset:
Pictures of objects belonging to 101 categories. About 40 to 800 images per category.
Most categories have about 50 images. Collected in September 2003 by Fei-Fei Li, Marco Andreetto,
and Marc 'Aurelio Ranzato. The size of each image is roughly 300 x 200 pixels.
The official provides the contour data of each object in each picture, which is the annotation.
.. code-block::
.
└── caltech101_dataset_directory
├── 101_ObjectCategories
│ ├── Faces
│ │ ├── image_0001.jpg
│ │ ├── image_0002.jpg
│ │ ...
│ ├── Faces_easy
│ │ ├── image_0001.jpg
│ │ ├── image_0002.jpg
│ │ ...
│ ├── ...
└── Annotations
├── Airplanes_Side_2
│ ├── annotation_0001.mat
│ ├── annotation_0002.mat
│ ...
├── Faces_2
│ ├── annotation_0001.mat
│ ├── annotation_0002.mat
│ ...
├── ...
Citation:
.. code-block::
@article{FeiFei2004LearningGV,
author = {Li Fei-Fei and Rob Fergus and Pietro Perona},
title = {Learning Generative Visual Models from Few Training Examples:
An Incremental Bayesian Approach Tested on 101 Object Categories},
journal = {Computer Vision and Pattern Recognition Workshop},
year = {2004},
url = {http://www.vision.caltech.edu/Image_Datasets/Caltech101/},
}
"""
@check_caltech101_dataset
def __init__(self, dataset_dir, target_type=None, num_samples=None, num_parallel_workers=1,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):
self.dataset_dir = dataset_dir
self.target_type = replace_none(target_type, "category")
self.decode = replace_none(decode, False)
dataset = _Caltech101Dataset(self.dataset_dir, self.target_type, self.decode)
super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
"""
class_dict = {'Faces': 0, 'Faces_easy': 1, 'Leopards': 2, 'Motorbikes': 3, 'accordion': 4, 'airplanes': 5,
'anchor': 6, 'ant': 7, 'barrel': 8, 'bass': 9, 'beaver': 10, 'binocular': 11, 'bonsai': 12,
'brain': 13, 'brontosaurus': 14, 'buddha': 15, 'butterfly': 16, 'camera': 17, 'cannon': 18,
'car_side': 19, 'ceiling_fan': 20, 'cellphone': 21, 'chair': 22, 'chandelier': 23,
'cougar_body': 24, 'cougar_face': 25, 'crab': 26, 'crayfish': 27, 'crocodile': 28,
'crocodile_head': 29, 'cup': 30, 'dalmatian': 31, 'dollar_bill': 32, 'dolphin': 33,
'dragonfly': 34, 'electric_guitar': 35, 'elephant': 36, 'emu': 37, 'euphonium': 38, 'ewer': 39,
'ferry': 40, 'flamingo': 41, 'flamingo_head': 42, 'garfield': 43, 'gerenuk': 44, 'gramophone': 45,
'grand_piano': 46, 'hawksbill': 47, 'headphone': 48, 'hedgehog': 49, 'helicopter': 50, 'ibis': 51,
'inline_skate': 52, 'joshua_tree': 53, 'kangaroo': 54, 'ketch': 55, 'lamp': 56, 'laptop': 57,
'llama': 58, 'lobster': 59, 'lotus': 60, 'mandolin': 61, 'mayfly': 62, 'menorah': 63,
'metronome': 64, 'minaret': 65, 'nautilus': 66, 'octopus': 67, 'okapi': 68, 'pagoda': 69,
'panda': 70, 'pigeon': 71, 'pizza': 72, 'platypus': 73, 'pyramid': 74, 'revolver': 75,
'rhino': 76, 'rooster': 77, 'saxophone': 78, 'schooner': 79, 'scissors': 80, 'scorpion': 81,
'sea_horse': 82, 'snoopy': 83, 'soccer_ball': 84, 'stapler': 85, 'starfish': 86,
'stegosaurus': 87, 'stop_sign': 88, 'strawberry': 89, 'sunflower': 90, 'tick': 91,
'trilobite': 92, 'umbrella': 93, 'watch': 94, 'water_lilly': 95, 'wheelchair': 96, 'wild_cat': 97,
'windsor_chair': 98, 'wrench': 99, 'yin_yang': 100}
return class_dict
class Caltech256Dataset(MappableDataset):
"""
A source dataset that reads and parses Caltech256 dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
decode (bool, optional): Whether or not to decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> caltech256_dataset_dir = "/path/to/caltech256_dataset_directory"
>>>
>>> # 1) Read all samples (image files) in caltech256_dataset_dir with 8 threads
>>> dataset = ds.Caltech256Dataset(dataset_dir=caltech256_dataset_dir, num_parallel_workers=8)
About Caltech256Dataset:
Caltech-256 is an object recognition dataset containing 30,607 real-world images, of different sizes,
spanning 257 classes (256 object classes and an additional clutter class).
Each class is represented by at least 80 images. The dataset is a superset of the Caltech-101 dataset.
.. code-block::
.
└── caltech256_dataset_directory
├── 001.ak47
│ ├── 001_0001.jpg
│ ├── 001_0002.jpg
│ ...
├── 002.american-flag
│ ├── 002_0001.jpg
│ ├── 002_0002.jpg
│ ...
├── 003.backpack
│ ├── 003_0001.jpg
│ ├── 003_0002.jpg
│ ...
├── ...
Citation:
.. code-block::
@article{griffin2007caltech,
title = {Caltech-256 object category dataset},
added-at = {2021-01-21T02:54:42.000+0100},
author = {Griffin, Gregory and Holub, Alex and Perona, Pietro},
biburl = {https://www.bibsonomy.org/bibtex/21f746f23ff0307826cca3e3be45f8de7/s364315},
interhash = {bfe1e648c1778c04baa60f23d1223375},
intrahash = {1f746f23ff0307826cca3e3be45f8de7},
publisher = {California Institute of Technology},
timestamp = {2021-01-21T02:54:42.000+0100},
year = {2007}
}
"""
@check_caltech256_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, decode=False,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.Caltech256Node(self.dataset_dir, self.decode, self.sampler)
class CocoDataset(MappableDataset):
"""
A source dataset for reading and parsing COCO dataset.
CocoDataset supports four kinds of tasks, which are Object Detection, Keypoint Detection, Stuff Segmentation and
Panoptic Segmentation of 2017 Train/Val/Test dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`.
- task = :py:obj:`Stuff`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[segmentation,dtype=float32]`, \
:py:obj:`[iscrowd,dtype=uint32]`.
- task = :py:obj:`Keypoint`, output columns: :py:obj:`[image, dtype=uint8]`, \
:py:obj:`[keypoints, dtype=float32]`, :py:obj:`[num_keypoints, dtype=uint32]`.
- task = :py:obj:`Panoptic`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`, :py:obj:`[area, dtype=uint32]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the annotation JSON file.
task (str, optional): Set the task type for reading COCO data. Supported task types:
`Detection`, `Stuff`, `Panoptic` and `Keypoint` (default= `Detection`).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the configuration file).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column will be
output at the end :py:obj:`[_meta-filename, dtype=string]` (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If parse JSON file failed.
ValueError: If task is not in [`Detection`, `Stuff`, `Panoptic`, `Keypoint`].
ValueError: If annotation_file is not exist.
ValueError: If dataset_dir is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op is added
to remove the prefix('_meta-').
- CocoDataset doesn't support PKSampler.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # 1) Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> # 2) Read COCO data for Stuff task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Stuff')
>>>
>>> # 3) Read COCO data for Panoptic task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Panoptic')
>>>
>>> # 4) Read COCO data for Keypoint task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Keypoint')
>>>
>>> # In COCO dataset, each dictionary has keys "image" and "annotation"
About COCO dataset:
COCO(Microsoft Common Objects in Context) is a large-scale object detection, segmentation, and captioning dataset
with several features: Object segmentation, Recognition in context, Superpixel stuff segmentation,
330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories,
5 captions per image, 250,000 people with keypoints. In contrast to the popular ImageNet dataset, COCO has fewer
categories but more instances in per category.
You can unzip the original COCO-2017 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── coco_dataset_directory
├── train2017
│ ├── 000000000009.jpg
│ ├── 000000000025.jpg
│ ├── ...
├── test2017
│ ├── 000000000001.jpg
│ ├── 000000058136.jpg
│ ├── ...
├── val2017
│ ├── 000000000139.jpg
│ ├── 000000057027.jpg
│ ├── ...
└── annotations
├── captions_train2017.json
├── captions_val2017.json
├── instances_train2017.json
├── instances_val2017.json
├── person_keypoints_train2017.json
└── person_keypoints_val2017.json
Citation:
.. code-block::
@article{DBLP:journals/corr/LinMBHPRDZ14,
author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and
Lubomir D. Bourdev and Ross B. Girshick and James Hays and
Pietro Perona and Deva Ramanan and Piotr Doll{\'{a}}r and C. Lawrence Zitnick},
title = {Microsoft {COCO:} Common Objects in Context},
journal = {CoRR},
volume = {abs/1405.0312},
year = {2014},
url = {http://arxiv.org/abs/1405.0312},
archivePrefix = {arXiv},
eprint = {1405.0312},
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_cocodataset
def __init__(self, dataset_dir, annotation_file, task="Detection", num_samples=None, num_parallel_workers=None,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None, cache=None,
extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.task = replace_none(task, "Detection")
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.CocoNode(self.dataset_dir, self.annotation_file, self.task, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-list<int> mapping from label name to index.
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task not in {"Detection", "Panoptic"}:
raise NotImplementedError("Only 'Detection' and 'Panoptic' support get_class_indexing.")
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = dict(runtime_getter[0].GetClassIndexing())
return self._class_indexing
class CoNLL2000Dataset(SourceDataset):
"""
A source dataset that reads and parses CoNLL2000 dataset.
The generated dataset has three columns: :py:obj:`[word, pos_tag, chunk_tag]`.
The tensor of column :py:obj:`word` is of the string type.
The tensor of column :py:obj:`pos_tag` is of the string type.
The tensor of column :py:obj:`chunk_tag` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, or `all`. `train` will read from
8936 train samples, `test` will read from 2,012 test samples,
`all` will read from all 1,0948 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> conll2000_dataset_dir = "/path/to/conll2000_dataset_dir"
>>> dataset = ds.CoNLL2000Dataset(dataset_files=conll2000_dataset_dir, usage='all')
"""
@check_conll2000_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None,
shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.CoNLL2000Node(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class CelebADataset(MappableDataset):
"""
A source dataset for reading and parsing CelebA dataset.
Only support to read `list_attr_celeba.txt` currently, which is the attribute annotations of the dataset.
The generated dataset has two columns: :py:obj:`[image, attr]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`attr` is of the uint32 type and one hot encoded.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_parallel_workers (int, optional): Number of workers to read the data (default=None, will use value set in
the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None).
usage (str, optional): Specify the `train`, `valid`, `test` part or `all` parts of dataset
(default= `all`, will read all samples).
sampler (Sampler, optional): Object used to choose samples from the dataset (default=None).
decode (bool, optional): decode the images after reading (default=False).
extensions (list[str], optional): List of file extensions to be included in the dataset (default=None).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will include all images).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> celeba_dataset_dir = "/path/to/celeba_dataset_directory"
>>>
>>> # Read 5 samples from CelebA dataset
>>> dataset = ds.CelebADataset(dataset_dir=celeba_dataset_dir, usage='train', num_samples=5)
>>>
>>> # Note: In celeba dataset, each data dictionary owns keys "image" and "attr"
About CelebA dataset:
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset
with more than 200K celebrity images, each with 40 attribute annotations.
The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including
* 10,177 number of identities,
* 202,599 number of face images,
* 5 landmark locations, 40 binary attributes annotations per image.
The dataset can be employed as the training and test sets for the following computer
vision tasks: face attribute recognition, face detection, landmark (or facial part)
localization, and face editing & synthesis.
Original CelebA dataset structure:
.. code-block::
.
└── CelebA
├── README.md
├── Img
│ ├── img_celeba.7z
│ ├── img_align_celeba_png.7z
│ └── img_align_celeba.zip
├── Eval
│ └── list_eval_partition.txt
└── Anno
├── list_landmarks_celeba.txt
├── list_landmarks_align_celeba.txt
├── list_bbox_celeba.txt
├── list_attr_celeba.txt
└── identity_CelebA.txt
You can unzip the dataset files into the following structure and read by MindSpore's API.
.. code-block::
.
└── celeba_dataset_directory
├── list_attr_celeba.txt
├── 000001.jpg
├── 000002.jpg
├── 000003.jpg
├── ...
Citation:
.. code-block::
@article{DBLP:journals/corr/LiuLWT14,
author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},
title = {Deep Learning Face Attributes in the Wild},
journal = {CoRR},
volume = {abs/1411.7766},
year = {2014},
url = {http://arxiv.org/abs/1411.7766},
archivePrefix = {arXiv},
eprint = {1411.7766},
timestamp = {Tue, 10 Dec 2019 15:37:26 +0100},
biburl = {https://dblp.org/rec/journals/corr/LiuLWT14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org},
howpublished = {http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html}
}
"""
@check_celebadataset
def __init__(self, dataset_dir, num_parallel_workers=None, shuffle=None, usage='all', sampler=None, decode=False,
extensions=None, num_samples=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
self.extensions = replace_none(extensions, [])
self.usage = replace_none(usage, "all")
def parse(self, children=None):
if self.usage != "all":
dataset_dir = os.path.realpath(self.dataset_dir)
partition_file = os.path.join(dataset_dir, "list_eval_partition.txt")
if os.path.exists(partition_file) is False:
raise RuntimeError("Partition file can not be found when usage is not 'all'.")
return cde.CelebANode(self.dataset_dir, self.usage, self.sampler, self.decode, self.extensions)
class CLUEDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses CLUE datasets.
Supported CLUE classification tasks: `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`AFQMC`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`TNEWS`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- task = :py:obj:`IFLYTEK`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=string]`, \
:py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- task = :py:obj:`CMNLI`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`WSC`
- usage = :py:obj:`train`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, :py:obj:`[text, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`CSL`
- usage = :py:obj:`train`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for
a pattern of files. The list will be sorted in a lexicographical order.
task (str, optional): The kind of task, one of `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
(default=AFQMC).
usage (str, optional): Specify the `train`, `test` or `eval` part of dataset (default="train").
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> clue_dataset_dir = ["/path/to/clue_dataset_file"] # contains 1 or multiple clue files
>>> dataset = ds.CLUEDataset(dataset_files=clue_dataset_dir, task='AFQMC', usage='train')
About CLUE dataset:
CLUE, a Chinese Language Understanding Evaluation benchmark. It contains multiple
tasks, including single-sentence classification, sentence pair classification, and machine
reading comprehension.
You can unzip the dataset files into the following structure and read by MindSpore's API,
such as afqmc dataset:
.. code-block::
.
└── afqmc_public
├── train.json
├── test.json
└── dev.json
Citation:
.. code-block::
@article{CLUEbenchmark,
title = {CLUE: A Chinese Language Understanding Evaluation Benchmark},
author = {Liang Xu, Xuanwei Zhang, Lu Li, Hai Hu, Chenjie Cao, Weitang Liu, Junyi Li, Yudong Li,
Kai Sun, Yechen Xu, Yiming Cui, Cong Yu, Qianqian Dong, Yin Tian, Dian Yu, Bo Shi, Jun Zeng,
Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou,
Shaoweihua Liu, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Zhenzhong Lan},
journal = {arXiv preprint arXiv:2004.05986},
year = {2020},
howpublished = {https://github.com/CLUEbenchmark/CLUE}
}
"""
@check_cluedataset
def __init__(self, dataset_files, task='AFQMC', usage='train', num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.usage = replace_none(usage, 'train')
self.task = replace_none(task, 'AFQMC')
def parse(self, children=None):
return cde.CLUENode(self.dataset_files, self.task, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class CSVDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses comma-separated values (CSV) datasets.
The columns of generated dataset depend on the source CSV files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search
for a pattern of files. The list will be sorted in a lexicographical order.
field_delim (str, optional): A string that indicates the char delimiter to separate fields (default=',').
column_defaults (list, optional): List of default values for the CSV field (default=None). Each item
in the list is either a valid type (float, int, or string). If this is not provided, treats all
columns as string type.
column_names (list[str], optional): List of column names of the dataset (default=None). If this
is not provided, infers the column_names from the first row of CSV file.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> csv_dataset_dir = ["/path/to/csv_dataset_file"] # contains 1 or multiple csv files
>>> dataset = ds.CSVDataset(dataset_files=csv_dataset_dir, column_names=['col1', 'col2', 'col3', 'col4'])
"""
@check_csvdataset
def __init__(self, dataset_files, field_delim=',', column_defaults=None, column_names=None, num_samples=None,
num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.field_delim = replace_none(field_delim, ',')
self.column_defaults = replace_none(column_defaults, [])
self.column_names = replace_none(column_names, [])
def parse(self, children=None):
return cde.CSVNode(self.dataset_files, self.field_delim, self.column_defaults, self.column_names,
self.num_samples, self.shuffle_flag, self.num_shards, self.shard_id)
class SBUDataset(MappableDataset):
"""
A source dataset for reading and parsing the SBU dataset.
The generated dataset has two columns :py:obj:`[image, caption]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`caption` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
decode (bool, optional): Decode the images after reading (default=False).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sbu_dataset_dir = "/path/to/sbu_dataset_directory"
>>> # Read 3 samples from SBU dataset
>>> dataset = ds.SBUDataset(dataset_dir=sbu_dataset_dir, num_samples=3)
About SBU dataset:
SBU dataset is a large captioned photo collection.
It contains one million images with associated visually relevant captions.
You should manually download the images using official download.m by replacing 'urls{i}(24, end)' with
'urls{i}(24:1:end)' and keep the directory as below.
.. code-block::
.
└─ dataset_dir
├── SBU_captioned_photo_dataset_captions.txt
├── SBU_captioned_photo_dataset_urls.txt
└── sbu_images
├── m_3326_3596303505_3ce4c20529.jpg
├── ......
└── m_2522_4182181099_c3c23ab1cc.jpg
Citation:
.. code-block::
@inproceedings{Ordonez:2011:im2text,
Author = {Vicente Ordonez and Girish Kulkarni and Tamara L. Berg},
Title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},
Booktitle = {Neural Information Processing Systems ({NIPS})},
Year = {2011},
}
"""
@check_sbu_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, decode=False,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.SBUNode(self.dataset_dir, self.decode, self.sampler)
class SogouNewsDataset(SourceDataset):
"""
A source dataset that reads and parses Sogou News dataset.
The generated dataset has three columns: :py:obj:`[index, title, content]`.
The tensor of column :py:obj:`index` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` .
`train` will read from 450,000 train samples, `test` will read from 60,000 test samples,
`all` will read from all 510,000 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, read all samples).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> sogou_news_dataset_dir = "/path/to/sogou_news_dataset_dir"
>>> dataset = ds.SogouNewsDataset(dataset_files=sogou_news_dataset_dir, usage='all')
About SogouNews Dataset:
SogouNews dataset includes 3 columns, corresponding to class index (1 to 5), title and content. The title and
content are escaped using double quotes ("), and any internal double quote is escaped by 2 double quotes ("").
New lines are escaped by a backslash followed with an "n" character, that is "\n".
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── sogou_news_dir
├── classes.txt
├── readme.txt
├── test.csv
└── train.csv
Citation:
.. code-block::
@misc{zhang2015characterlevel,
title={Character-level Convolutional Networks for Text Classification},
author={Xiang Zhang and Junbo Zhao and Yann LeCun},
year={2015},
eprint={1509.01626},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
@check_sogou_news_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None,
shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.SogouNewsNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class _Flowers102Dataset:
"""
Mainly for loading Flowers102 Dataset, and return one row each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = task
self.usage = usage
self.decode = decode
if self.task == "Classification":
self.column_names = ["image", "label"]
else:
self.column_names = ["image", "segmentation", "label"]
labels_path = os.path.join(self.dataset_dir, "imagelabels.mat")
setid_path = os.path.join(self.dataset_dir, "setid.mat")
# minus one to transform 1~102 to 0 ~ 101
self.labels = (loadmat(labels_path)["labels"][0] - 1).astype(np.uint32)
self.setid = loadmat(setid_path)
if self.usage == 'train':
self.indices = self.setid["trnid"][0].tolist()
elif self.usage == 'test':
self.indices = self.setid["tstid"][0].tolist()
elif self.usage == 'valid':
self.indices = self.setid["valid"][0].tolist()
elif self.usage == 'all':
self.indices = self.setid["trnid"][0].tolist()
self.indices += self.setid["tstid"][0].tolist()
self.indices += self.setid["valid"][0].tolist()
else:
raise ValueError("Input usage is not within the valid set of ['train', 'valid', 'test', 'all'].")
def __getitem__(self, index):
# range: 1 ~ 8189
image_path = os.path.join(self.dataset_dir, "jpg", "image_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(image_path):
raise RuntimeError("Can not find image file: " + image_path)
if self.decode is True:
image = np.asarray(Image.open(image_path).convert("RGB"))
else:
image = np.fromfile(image_path, dtype=np.uint8)
label = self.labels[self.indices[index] - 1]
if self.task == "Segmentation":
segmentation_path = \
os.path.join(self.dataset_dir, "segmim", "segmim_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(segmentation_path):
raise RuntimeError("Can not find segmentation file: " + segmentation_path)
if self.decode is True:
segmentation = np.asarray(Image.open(segmentation_path).convert("RGB"))
else:
segmentation = np.fromfile(segmentation_path, dtype=np.uint8)
return image, segmentation, label
return image, label
def __len__(self):
return len(self.indices)
class Flowers102Dataset(GeneratorDataset):
"""
A source dataset for reading and parsing Flowers102 dataset.
The generated dataset has two columns :py:obj:`[image, label]` or three :py:obj:`[image, segmentation, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`segmentation` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar or a tensor of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str): Specify the 'Classification' or 'Segmentation' task (default='Classification').
usage (str): Specify the 'train', 'valid', 'test' part or 'all' parts of dataset
(default='all', will read all samples).
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
decode (bool, optional): Whether or not to decode the images and segmentations after reading (default=False).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, 'num_samples' reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flowers102_dataset_dir = "/path/to/flowers102_dataset_directory"
>>> dataset = ds.Flowers102Dataset(dataset_dir=flowers102_dataset_dir,
... task="Classification",
... usage="all",
... decode=True)
About Flowers102 dataset:
Flowers102 dataset consists of 102 flower categories.
The flowers commonly occur in the United Kingdom.
Each class consists of between 40 and 258 images.
Here is the original Flowers102 dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── flowes102_dataset_dir
├── imagelabels.mat
├── setid.mat
├── jpg
├── image_00001.jpg
├── image_00002.jpg
├── ...
├── segmim
├── segmim_00001.jpg
├── segmim_00002.jpg
├── ...
Citation:
.. code-block::
@InProceedings{Nilsback08,
author = "Maria-Elena Nilsback and Andrew Zisserman",
title = "Automated Flower Classification over a Large Number of Classes",
booktitle = "Indian Conference on Computer Vision, Graphics and Image Processing",
month = "Dec",
year = "2008",
}
"""
@check_flowers102dataset
def __init__(self, dataset_dir, task="Classification", usage="all", num_samples=None, num_parallel_workers=1,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = replace_none(task, "Classification")
self.usage = replace_none(usage, "all")
self.decode = replace_none(decode, False)
dataset = _Flowers102Dataset(self.dataset_dir, self.task, self.usage, self.decode)
super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
"""
class_names = [
"pink primrose", "hard-leaved pocket orchid", "canterbury bells",
"sweet pea", "english marigold", "tiger lily", "moon orchid",
"bird of paradise", "monkshood", "globe thistle", "snapdragon",
"colt's foot", "king protea", "spear thistle", "yellow iris",
"globe-flower", "purple coneflower", "peruvian lily", "balloon flower",
"giant white arum lily", "fire lily", "pincushion flower", "fritillary",
"red ginger", "grape hyacinth", "corn poppy", "prince of wales feathers",
"stemless gentian", "artichoke", "sweet william", "carnation",
"garden phlox", "love in the mist", "mexican aster", "alpine sea holly",
"ruby-lipped cattleya", "cape flower", "great masterwort", "siam tulip",
"lenten rose", "barbeton daisy", "daffodil", "sword lily", "poinsettia",
"bolero deep blue", "wallflower", "marigold", "buttercup", "oxeye daisy",
"common dandelion", "petunia", "wild pansy", "primula", "sunflower",
"pelargonium", "bishop of llandaff", "gaura", "geranium", "orange dahlia",
"pink-yellow dahlia?", "cautleya spicata", "japanese anemone",
"black-eyed susan", "silverbush", "californian poppy", "osteospermum",
"spring crocus", "bearded iris", "windflower", "tree poppy", "gazania",
"azalea", "water lily", "rose", "thorn apple", "morning glory",
"passion flower", "lotus", "toad lily", "anthurium", "frangipani",
"clematis", "hibiscus", "columbine", "desert-rose", "tree mallow",
"magnolia", "cyclamen", "watercress", "canna lily", "hippeastrum",
"bee balm", "ball moss", "foxglove", "bougainvillea", "camellia", "mallow",
"mexican petunia", "bromelia", "blanket flower", "trumpet creeper",
"blackberry lily"
]
class_dict = {}
for i, class_name in enumerate(class_names):
class_dict[class_name] = i
return class_dict
class LJSpeechDataset(MappableDataset):
"""
A source dataset for reading and parsing LJSpeech dataset.
The generated dataset has four columns :py:obj:`[waveform, sample_rate, transcription, normalized_transcript]`.
The tensor of column :py:obj:`waveform` is a tensor of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`transcription` is a scalar of the string type.
The tensor of column :py:obj:`normalized_transcript` is a scalar of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of audios to be included in the dataset
(default=None, all audios).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> lj_speech_dataset_dir = "/path/to/lj_speech_dataset_directory"
>>>
>>> # 1) Get all samples from LJSPEECH dataset in sequence
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from LJSPEECH dataset
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from LJSPEECH dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In LJSPEECH dataset, each dictionary has keys "waveform", "sample_rate", "transcription"
>>> # and "normalized_transcript"
About LJSPEECH dataset:
This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker
reading passages from 7 non-fiction books. A transcription is provided for each clip.
Clips vary in length from 1 to 10 seconds and have a total length of approximately 24 hours.
The texts were published between 1884 and 1964, and are in the public domain.
The audio was recorded in 2016-17 by the LibriVox project and is also in the public domain.
Here is the original LJSPEECH dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── LJSpeech-1.1
├── README
├── metadata.csv
└── wavs
├── LJ001-0001.wav
├── LJ001-0002.wav
├── LJ001-0003.wav
├── LJ001-0004.wav
├── LJ001-0005.wav
├── LJ001-0006.wav
├── LJ001-0007.wav
├── LJ001-0008.wav
...
├── LJ050-0277.wav
└── LJ050-0278.wav
Citation:
.. code-block::
@misc{lj_speech17,
author = {Keith Ito and Linda Johnson},
title = {The LJ Speech Dataset},
howpublished = {url{https://keithito.com/LJ-Speech-Dataset}},
year = 2017
}
"""
@check_lj_speech_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
def parse(self, children=None):
return cde.LJSpeechNode(self.dataset_dir, self.sampler)
class TextFileDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses datasets stored on disk in text format.
The generated dataset has one column :py:obj:`[text]` with type string.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> text_file_dataset_dir = ["/path/to/text_file_dataset_file"] # contains 1 or multiple text files
>>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)
"""
@check_textfiledataset
def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
def parse(self, children=None):
return cde.TextFileNode(self.dataset_files, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class _NumpySlicesDataset:
"""
Mainly for dealing with several kinds of formats of Python data, and return one row each time.
"""
def __init__(self, data, column_list=None):
self.column_list = None
# Convert dict data into tuple
if isinstance(data, dict):
data = self.process_dict(data)
if isinstance(data, tuple):
self.data = ()
data_len = len(data)
for i in range(data_len):
self.data = self.data + (np.array(data[i]),)
else:
self.data = (np.array(data),)
# check whether the data length in each column is equal
data_len = [len(data_item) for data_item in self.data]
if data_len[1:] != data_len[:-1]:
raise ValueError("Data length in each column is not equal.")
# Init column_name
if column_list is not None:
self.column_list = column_list
elif self.column_list is None:
self.column_list = []
column_num = len(self.data)
for i in range(column_num):
self.column_list.append("column_" + str(i))
def __getitem__(self, index):
data_row = [d[index, ...] for d in self.data]
data_res = tuple(data_row)
return data_res
def __len__(self):
return len(self.data[0])
def process_dict(self, input_data):
"""
Convert the dict like data into tuple format, when input is a tuple of dicts then compose it into a dict first.
"""
# Convert pandas like dict(has "values" column) into General dict
data_keys = list(input_data.keys())
data_col = input_data[data_keys[0]]
if hasattr(data_col, "values"):
new_dict = {}
for key in data_keys:
item1 = input_data.pop(key)
new_dict[key] = item1.values
input_data = new_dict
# Convert the data in dict into tuple
data = ()
keys = list(input_data.keys())
self.column_list = keys
for key in keys:
value = input_data[key]
data = data + (list(value),)
return data
class NumpySlicesDataset(GeneratorDataset):
"""
Creates a dataset with given data slices, mainly for loading Python data into dataset.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
data (Union[list, tuple, dict]) Input of given data. Supported data types include: list, tuple, dict and other
NumPy formats. Input data will be sliced along the first dimension and generate additional rows, if input is
list, there will be one column in each row, otherwise there tends to be multi columns. Large data is not
recommended to be loaded in this way as data is loading into memory.
column_names (list[str], optional): List of column names of the dataset (default=None). If column_names is not
provided, the output column names will be named as the keys of dict when the input data is a dict,
otherwise they will be named like column_0, column_1 ...
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all samples).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Raises:
RuntimeError: If len of column_names does not match output len of data.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> # 1) Input data can be a list
>>> data = [1, 2, 3]
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1"])
>>>
>>> # 2) Input data can be a dictionary, and column_names will be its keys
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> dataset = ds.NumpySlicesDataset(data=data)
>>>
>>> # 3) Input data can be a tuple of lists (or NumPy arrays), each tuple element refers to data in each column
>>> data = ([1, 2], [3, 4], [5, 6])
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1", "column_2", "column_3"])
>>>
>>> # 4) Load data from CSV file
>>> import pandas as pd
>>> df = pd.read_csv(filepath_or_buffer=csv_dataset_dir[0])
>>> dataset = ds.NumpySlicesDataset(data=dict(df), shuffle=False)
"""
@check_numpyslicesdataset
def __init__(self, data, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None,
num_shards=None, shard_id=None):
dataset = _NumpySlicesDataset(data, column_names)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _PaddedDataset:
"""
Mainly for combining false samples provided by users into a dataset.
Args:
padded_samples (list(dict)): Data provided by user to be added to the initial Dataset.
"""
def __init__(self, padded_samples):
self.column_names = list(padded_samples[0].keys())
self.padded_samples = padded_samples
def __getitem__(self, item):
return (self.padded_samples[item][key] for key in self.column_names)
def __len__(self):
return len(self.padded_samples)
class PaddedDataset(GeneratorDataset):
"""
Creates a dataset with filler data provided by user. Mainly used to add to the original data set
and assign it to the corresponding shard.
Args:
padded_samples (list(dict)): Samples provided by user.
Raises:
TypeError: If padded_samples is not an instance of list.
TypeError: If the element of padded_samples is not an instance of dict.
ValueError: If the padded_samples is empty.
Examples:
>>> import numpy as np
>>> data = [{'image': np.zeros(1, np.uint8)}, {'image': np.zeros(2, np.uint8)}]
>>> dataset = ds.PaddedDataset(padded_samples=data)
"""
@check_paddeddataset
def __init__(self, padded_samples):
dataset = _PaddedDataset(padded_samples)
super().__init__(dataset, column_names=dataset.column_names, num_shards=None, shard_id=None, shuffle=False)
self._dataset_size = len(dataset.padded_samples)
self.padded_samples = padded_samples
class EMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the EMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of splits for this dataset, can be "byclass", "bymerge", "balanced", "letters", "digits"
or "mnist".
usage (str, optional): Usage of this dataset, can be "train", "test" or "all".
(default=None, will read all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> emnist_dataset_dir = "/path/to/emnist_dataset_directory"
>>>
>>> # Read 3 samples from EMNIST dataset
>>> dataset = ds.EMnistDataset(dataset_dir=emnist_dataset_dir, name="mnist", num_samples=3)
>>>
>>> # Note: In emnist_dataset dataset, each dictionary has keys "image" and "label"
About EMNIST dataset:
The EMNIST dataset is a set of handwritten character digits derived from the NIST Special
Database 19 and converted to a 28x28 pixel image format and dataset structure that directly
matches the MNIST dataset. Further information on the dataset contents and conversion process
can be found in the paper available at https://arxiv.org/abs/1702.05373v1.
The numbers of characters and classes of each split of EMNIST are as follows:
By Class: 814,255 characters and 62 unbalanced classes.
By Merge: 814,255 characters and 47 unbalanced classes.
Balanced: 131,600 characters and 47 balanced classes.
Letters: 145,600 characters and 26 balanced classes.
Digits: 280,000 characters and 10 balanced classes.
MNIST: 70,000 characters and 10 balanced classes.
Here is the original EMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── emnist-mnist-train-images-idx3-ubyte
├── emnist-mnist-train-labels-idx1-ubyte
├── emnist-mnist-test-images-idx3-ubyte
├── emnist-mnist-test-labels-idx1-ubyte
├── ...
Citation:
.. code-block::
@article{cohen_afshar_tapson_schaik_2017,
title = {EMNIST: Extending MNIST to handwritten letters},
DOI = {10.1109/ijcnn.2017.7966217},
journal = {2017 International Joint Conference on Neural Networks (IJCNN)},
author = {Cohen, Gregory and Afshar, Saeed and Tapson, Jonathan and Schaik, Andre Van},
year = {2017},
howpublished = {https://www.westernsydney.edu.au/icns/reproducible_research/
publication_support_materials/emnist}
}
"""
@check_emnist_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.EMnistNode(self.dataset_dir, self.name, self.usage, self.sampler)
class FakeImageDataset(MappableDataset):
"""
A source dataset for generating fake images.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
num_images (int, optional): Number of images to generate in the dataset (default=1000).
image_size (tuple, optional): Size of the fake image (default=(224, 224, 3)).
num_classes (int, optional): Number of classes in the dataset (default=10).
base_seed (int, optional): Offsets the index-based random seed used to generate each image (default=0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from FakeImage dataset
>>> dataset = ds.FakeImageDataset(num_images=1000, image_size=(224,224,3),
... num_classes=10, base_seed=0, num_samples=3)
>>>
>>> # Note: In FakeImage dataset, each dictionary has keys "image" and "label"
"""
@check_fake_image_dataset
def __init__(self, num_images=1000, image_size=(224, 224, 3), num_classes=10, base_seed=0, num_samples=None,
num_parallel_workers=None, shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.num_images = num_images
self.image_size = image_size
self.num_classes = num_classes
self.base_seed = base_seed
def parse(self, children=None):
return cde.FakeImageNode(self.num_images, self.image_size, self.num_classes, self.base_seed, self.sampler)
class FlickrDataset(MappableDataset):
"""
A source dataset for reading and parsing Flickr8k and Flickr30k dataset.
The generated dataset has two columns :py:obj:`[image, annotation]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`annotation` is a tensor which contains 5 annotations string,
such as ["a", "b", "c", "d", "e"].
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the root directory that contains the annotation.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If annotation_file is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flickr_dataset_dir = "/path/to/flickr_dataset_directory"
>>> annotation_file = "/path/to/flickr_annotation_file"
>>>
>>> # 1) Get all samples from FLICKR dataset in sequence
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from FLICKR dataset
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_samples=350,
... shuffle=True)
>>>
>>> # 3) Get samples from FLICKR dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_shards=2,
... shard_id=0)
>>>
>>> # In FLICKR dataset, each dictionary has keys "image" and "annotation"
About Flickr8k dataset:
The Flickr8k dataset consists of 8092 colour images. There are 40460 annotations in the Flickr8k.token.txt,
each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── Flickr8k
├── Flickr8k_Dataset
│ ├── 1000268201_693b08cb0e.jpg
│ ├── 1001773457_577c3a7d70.jpg
│ ├── ...
└── Flickr8k.token.txt
Citation:
.. code-block::
@article{DBLP:journals/jair/HodoshYH13,
author = {Micah Hodosh and Peter Young and Julia Hockenmaier},
title = {Framing Image Description as a Ranking Task: Data, Models and Evaluation Metrics},
journal = {J. Artif. Intell. Res.},
volume = {47},
pages = {853--899},
year = {2013},
url = {https://doi.org/10.1613/jair.3994},
doi = {10.1613/jair.3994},
timestamp = {Mon, 21 Jan 2019 15:01:17 +0100},
biburl = {https://dblp.org/rec/journals/jair/HodoshYH13.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
About Flickr30k dataset:
The Flickr30k dataset consists of 31783 colour images. There are 158915 annotations in
the results_20130124.token, each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Citation:
.. code-block::
.
└── Flickr30k
├── flickr30k-images
│ ├── 1000092795.jpg
│ ├── 10002456.jpg
│ ├── ...
└── results_20130124.token
.. code-block::
@article{DBLP:journals/tacl/YoungLHH14,
author = {Peter Young and Alice Lai and Micah Hodosh and Julia Hockenmaier},
title = {From image descriptions to visual denotations: New similarity metrics
for semantic inference over event descriptions},
journal = {Trans. Assoc. Comput. Linguistics},
volume = {2},
pages = {67--78},
year = {2014},
url = {https://tacl2013.cs.columbia.edu/ojs/index.php/tacl/article/view/229},
timestamp = {Wed, 17 Feb 2021 21:55:25 +0100},
biburl = {https://dblp.org/rec/journals/tacl/YoungLHH14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_flickr_dataset
def __init__(self, dataset_dir, annotation_file, num_samples=None, num_parallel_workers=None, shuffle=None,
decode=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.FlickrNode(self.dataset_dir, self.annotation_file, self.decode, self.sampler)
class SBDataset(GeneratorDataset):
"""
A source dataset for reading and parsing Semantic Boundaries Dataset.
The generated dataset has two columns: :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` contains 20 images of the uint8 type if `task` is `Boundaries` otherwise
contains 1 image of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Acceptable tasks include `Boundaries` or `Segmentation` (default= `Boundaries`).
usage (str, optional): Acceptable usages include `train`, `val`, `train_noval` and `all` (default= `all`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is not in [`Boundaries`, `Segmentation`].
ValueError: If usage is not in [`train`, `val`, `train_noval`, `all`].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sb_dataset_dir = "/path/to/sb_dataset_directory"
>>>
>>> # 1) Get all samples from Semantic Boundaries Dataset in sequence
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from Semantic Boundaries Dataset
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from Semantic Boundaries Dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In Semantic Boundaries Dataset, each dictionary has keys "image" and "task"
About Semantic Boundaries Dataset:
The Semantic Boundaries Dataset consists of 11355 colour images. There are 8498 images' name in the train.txt,
2857 images' name in the val.txt and 5623 images' name in the train_noval.txt. The category cls/
contains the Segmentation and Boundaries results of category-level, the category inst/ catains the
Segmentation and Boundaries results of instance-level.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── benchmark_RELEASE
├── dataset
├── img
│ ├── 2008_000002.jpg
│ ├── 2008_000003.jpg
│ ├── ...
├── cls
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── inst
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── train.txt
└── val.txt
.. code-block::
@InProceedings{BharathICCV2011,
author = "Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and
Subhransu Maji and Jitendra Malik",
title = "Semantic Contours from Inverse Detectors",
booktitle = "International Conference on Computer Vision (ICCV)",
year = "2011",
"""
@check_sb_dataset
def __init__(self, dataset_dir, task='Boundaries', usage='all', num_samples=None, num_parallel_workers=1,
shuffle=None, decode=None, sampler=None, num_shards=None, shard_id=None):
dataset = _SBDataset(dataset_dir, task, usage, decode)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _SBDataset:
"""
Dealing with the data file with .mat extension, and return one row in tuple (image, task) each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.column_list = ['image', 'task']
self.task = task
self.images_path = os.path.join(dataset_dir, 'img')
self.cls_path = os.path.join(dataset_dir, 'cls')
self._loadmat = loadmat
self.categories = 20
self.decode = replace_none(decode, False)
if usage == "all":
image_names = []
for item in ["train", "val"]:
usage_path = os.path.join(dataset_dir, item + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names += [x.strip() for x in f.readlines()]
else:
usage_path = os.path.join(dataset_dir, usage + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(self.images_path, i + ".jpg") for i in image_names]
self.clss = [os.path.join(self.cls_path, i + ".mat") for i in image_names]
if len(self.images) != len(self.clss):
raise ValueError("SBDataset: images count not equal to cls count")
self._get_data = self._get_boundaries_data if self.task == "Boundaries" else self._get_segmentation_data
self._get_item = self._get_decode_item if self.decode else self._get_undecode_item
def _get_boundaries_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return np.concatenate([np.expand_dims(mat_data['GTcls'][0][self.task][0][i][0].toarray(), axis=0)
for i in range(self.categories)], axis=0)
def _get_segmentation_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return Image.fromarray(mat_data['GTcls'][0][self.task][0])
def _get_decode_item(self, idx):
return Image.open(self.images[idx]).convert('RGB'), self._get_data(self.clss[idx])
def _get_undecode_item(self, idx):
return np.fromfile(self.images[idx], dtype=np.uint8), self._get_data(self.clss[idx])
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
return self._get_item(idx)
class SpeechCommandsDataset(MappableDataset):
"""
A source dataset for reading and parsing the SpeechCommands dataset.
The generated dataset has five columns :py:obj:`[waveform, sample_rate, label, speaker_id, utterance_number]`.
The tensor of column :py:obj:`waveform` is a vector of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`label` is a scalar of the string type.
The tensor of column :py:obj:`speaker_id` is a scalar of the string type.
The tensor of column :py:obj:`utterance_number` is a scalar of the int32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `valid` or `all`. `train`
will read from 84,843 samples, `test` will read from 11,005 samples, `valid` will read from 9,981
test samples and `all` will read from all 105,829 samples (default=None, will read all samples).
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will read all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This argument can only be specified
when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> speech_commands_dataset_dir = "/path/to/speech_commands_dataset_directory"
>>>
>>> # Read 3 samples from SpeechCommands dataset
>>> dataset = ds.SpeechCommandsDataset(dataset_dir=speech_commands_dataset_dir, num_samples=3)
>>>
>>> # Note: In SpeechCommands dataset, each dictionary has keys "waveform", "sample_rate", "label",
>>> # "speaker_id" and "utterance_number".
About SpeechCommands dataset:
The SpeechCommands is database for limited_vocabulary speech recognition, containing 105,829 audio samples of
'.wav' format.
Here is the original SpeechCommands dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── speech_commands_dataset_dir
├── cat
├── b433eff_nohash_0.wav
├── 5a33edf_nohash_1.wav
└──....
├── dog
├── b433w2w_nohash_0.wav
└──....
├── four
└── ....
Citation:
.. code-block::
@article{2018Speech,
title={Speech Commands: A Dataset for Limited-Vocabulary Speech Recognition},
author={Warden, P.},
year={2018}
}
"""
@check_speech_commands_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.SpeechCommandsNode(self.dataset_dir, self.usage, self.sampler)
class DeserializedDataset(Dataset):
def __init__(self, input_obj):
super().__init__()
self.input_obj = input_obj
def parse(self, children=None):
if isinstance(self.input_obj, dict):
json_str = json.dumps(self.input_obj)
return cde.Dataset.from_json_string(json_str)
return cde.Dataset.from_json_file(self.input_obj)
class CityscapesDataset(MappableDataset):
"""
A source dataset for reading and parsing Cityscapes dataset.
The generated dataset has two columns :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` is of the uint8 type if task is not 'polygon' otherwise task is
a string tensor with serialize json.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `test`, `val` or `all` if quality_mode is `fine`
otherwise `train`, `train_extra`, `val` or `all` (default= `train`).
quality_mode (str): Acceptable quality_modes include `fine` or `coarse` (default= `fine`).
task (str): Acceptable tasks include `instance`, `semantic`, `polygon` or `color` (default= `instance`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is invalid.
ValueError: If quality_mode is invalid.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cityscapes_dataset_dir = "/path/to/cityscapes_dataset_directory"
>>>
>>> # 1) Get all samples from Cityscapes dataset in sequence
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, task="instance", quality_mode="fine",
... usage="train", shuffle=False, num_parallel_workers=1)
>>>
>>> # 2) Randomly select 350 samples from Cityscapes dataset
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_samples=350, shuffle=True,
... num_parallel_workers=1)
>>>
>>> # 3) Get samples from Cityscapes dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_shards=2, shard_id=0,
... num_parallel_workers=1)
>>>
>>> # In Cityscapes dataset, each dictionary has keys "image" and "task"
About Cityscapes dataset:
The Cityscapes dataset consists of 5000 colour images with high quality dense pixel annotations and
19998 colour images with coarser polygonal annotations in 50 cities. There are 30 classes in this
dataset and the polygonal annotations include dense semantic segmentation and instance segmentation
for vehicle and people.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Taking the quality_mode of `fine` as an example.
.. code-block::
.
└── Cityscapes
├── leftImg8bit
| ├── train
| | ├── aachen
| | | ├── aachen_000000_000019_leftImg8bit.png
| | | ├── aachen_000001_000019_leftImg8bit.png
| | | ├── ...
| | ├── bochum
| | | ├── ...
| | ├── ...
| ├── test
| | ├── ...
| ├── val
| | ├── ...
└── gtFine
├── train
| ├── aachen
| | ├── aachen_000000_000019_gtFine_color.png
| | ├── aachen_000000_000019_gtFine_instanceIds.png
| | ├── aachen_000000_000019_gtFine_labelIds.png
| | ├── aachen_000000_000019_gtFine_polygons.json
| | ├── aachen_000001_000019_gtFine_color.png
| | ├── aachen_000001_000019_gtFine_instanceIds.png
| | ├── aachen_000001_000019_gtFine_labelIds.png
| | ├── aachen_000001_000019_gtFine_polygons.json
| | ├── ...
| ├── bochum
| | ├── ...
| ├── ...
├── test
| ├── ...
└── val
├── ...
Citation:
.. code-block::
@inproceedings{Cordts2016Cityscapes,
title = {The Cityscapes Dataset for Semantic Urban Scene Understanding},
author = {Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler,
Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle = {Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2016}
}
"""
@check_cityscapes_dataset
def __init__(self, dataset_dir, usage="train", quality_mode="fine", task="instance", num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = task
self.quality_mode = quality_mode
self.usage = usage
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.CityscapesNode(self.dataset_dir, self.usage, self.quality_mode, self.task, self.decode, self.sampler)
class DBpediaDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses the DBpedia dataset.
The generated dataset has three columns :py:obj:`[class, title, content]`.
The tensor of column :py:obj:`class` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`.
`train` will read from 560,000 train samples,
`test` will read from 70,000 test samples,
`all` will read from all 630,000 samples (default=None, all samples).
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all text).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL;
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> dbpedia_dataset_dir = "/path/to/dbpedia_dataset_directory"
>>>
>>> # 1) Read 3 samples from DBpedia dataset
>>> dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, num_samples=3)
>>>
>>> # 2) Read train samples from DBpedia dataset
>>> dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, usage="train")
About DBpedia dataset:
The DBpedia dataset consists of 630,000 text samples in 14 classes, there are 560,000 samples in the train.csv
and 70,000 samples in the test.csv.
The 14 different classes represent Company, EducationaInstitution, Artist, Athlete, OfficeHolder,
MeanOfTransportation, Building, NaturalPlace, Village, Animal, Plant, Album, Film, WrittenWork.
Here is the original DBpedia dataset structure.
You can unzip the dataset files into this directory structure and read by Mindspore's API.
.. code-block::
.
└── dbpedia_dataset_dir
├── train.csv
├── test.csv
├── classes.txt
└── readme.txt
.. code-block::
@article{DBpedia,
title = {DBPedia Ontology Classification Dataset},
author = {Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas,
Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef,
Sören Auer, Christian Bizer},
year = {2015},
howpublished = {http://dbpedia.org}
}
"""
@check_dbpedia_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.DBpediaNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class DIV2KDataset(MappableDataset):
"""
A source dataset for reading and parsing DIV2KDataset dataset.
The generated dataset has two columns :py:obj:`[hr_image, lr_image]`.
The tensor of column :py:obj:`hr_image` is of the uint8 type.
The tensor of column :py:obj:`lr_image` is of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `valid` or `all` (default= `train`).
downgrade (str): Acceptable downgrades include `bicubic`, `unknown`, `mild`, `difficult` or
`wild` (default= `bicubic`).
scale (int): Acceptable scales include 2, 3, 4 or 8 (default=2).
When `downgrade` is `bicubic`, scale can be 2, 3, 4, 8.
When `downgrade` is `unknown`, scale can only be 2, 3, 4.
When `downgrade` is `mild`, `difficult` or `wild`, scale can only be 4.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is invalid.
ValueError: If downgrade is invalid.
ValueError: If scale is invalid.
ValueError: If scale equal to 8 and downgrade not equal to `bicubic`.
ValueError: If downgrade in [`mild`, `difficult`, `wild`] and scale not equal to 4.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> div2k_dataset_dir = "/path/to/div2k_dataset_directory"
>>>
>>> # 1) Get all samples from DIV2K dataset in sequence
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from DIV2K dataset
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from DIV2K dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_shards=2, shard_id=0)
>>>
>>> # In DIV2K dataset, each dictionary has keys "hr_image" and "lr_image"
About DIV2K dataset:
The DIV2K dataset consists of 1000 2K resolution images, among which 800 images are for training, 100 images
are for validation and 100 images are for testing. NTIRE 2017 and NTIRE 2018 include only training dataset
and validation dataset.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Take the training set as an example.
.. code-block::
.
└── DIV2K
├── DIV2K_train_HR
| ├── 0001.png
| ├── 0002.png
| ├── ...
├── DIV2K_train_LR_bicubic
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_unknown
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_mild
| ├── 0001x4m.png
| ├── 0002x4m.png
| ├── ...
├── DIV2K_train_LR_difficult
| ├── 0001x4d.png
| ├── 0002x4d.png
| ├── ...
├── DIV2K_train_LR_wild
| ├── 0001x4w.png
| ├── 0002x4w.png
| ├── ...
└── DIV2K_train_LR_x8
├── 0001x8.png
├── 0002x8.png
├── ...
Citation:
.. code-block::
@InProceedings{Agustsson_2017_CVPR_Workshops,
author = {Agustsson, Eirikur and Timofte, Radu},
title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
month = {July},
year = {2017}
}
"""
@check_div2k_dataset
def __init__(self, dataset_dir, usage="train", downgrade="bicubic", scale=2, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = usage
self.scale = scale
self.downgrade = downgrade
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.DIV2KNode(self.dataset_dir, self.usage, self.downgrade, self.scale, self.decode, self.sampler)
class WIDERFaceDataset(MappableDataset):
"""
A source dataset for reading and parsing WIDERFace dataset.
When usage is "train", "valid" or "all", the generated dataset has eight columns ["image", "bbox", "blur",
"expression", "illumination", "occlusion", "pose", "invalid"]. When usage is "test", it only has one column
["image"].
The tensor of column :py:obj:`image` is a vector of the uint8 type.
The tensor of column :py:obj:`bbox` is a scalar of the uint32 type.
The tensor of column :py:obj:`blur` is a scalar of the uint32 type.
The tensor of column :py:obj:`expression` is a scalar of the uint32 type.
The tensor of column :py:obj:`illumination` is a scalar of the uint32 type.
The tensor of column :py:obj:`occlusion` is a scalar of the uint32 type.
The tensor of column :py:obj:`pose` is a scalar of the uint32 type.
The tensor of column :py:obj:`invalid` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `valid` or `all`. `train` will read
from 12,880 samples, `test` will read from 16,097 samples, `valid` will read from 3,226 test samples
and `all` will read all `train` and `valid` samples (default=None, will be set to `all`).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This argument can only be specified
when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
ValueError: If usage is not in [`train`, `test`, `valid`, `all`].
ValueError: If annotation_file is not exist.
ValueError: If dataset_dir is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> wider_face_dir = "/path/to/wider_face_dataset"
>>>
>>> # Read 3 samples from WIDERFace dataset
>>> dataset = ds.WIDERFaceDataset(dataset_dir=wider_face_dir, num_samples=3)
About WIDERFace dataset:
The WIDERFace database of people faces has a training set of 12,880 samples, a testing set of 16,097 examples
and a validating set of 3,226 examples. It is a subset of a larger set available from WIDER. The digits have
been size-normalized and centered in a fixed-size image.
The following is the original WIDERFace dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── wider_face_dir
├── WIDER_test
│ └── images
│ ├── 0--Parade
│ │ ├── 0_Parade_marchingband_1_9.jpg
│ │ ├── ...
│ ├──1--Handshaking
│ ├──...
├── WIDER_train
│ └── images
│ ├── 0--Parade
│ │ ├── 0_Parade_marchingband_1_11.jpg
│ │ ├── ...
│ ├──1--Handshaking
│ ├──...
├── WIDER_val
│ └── images
│ ├── 0--Parade
│ │ ├── 0_Parade_marchingband_1_102.jpg
│ │ ├── ...
│ ├──1--Handshaking
│ ├──...
└── wider_face_split
├── wider_face_test_filelist.txt
├── wider_face_train_bbx_gt.txt
└── wider_face_val_bbx_gt.txt
Citation:
.. code-block::
@inproceedings{2016WIDER,
title={WIDER FACE: A Face Detection Benchmark},
author={Yang, S. and Luo, P. and Loy, C. C. and Tang, X.},
booktitle={IEEE},
pages={5525-5533},
year={2016},
}
"""
@check_wider_face_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
decode=False, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.WIDERFaceNode(self.dataset_dir, self.usage, self.decode, self.sampler)
class YelpReviewDataset(SourceDataset, TextBaseDataset):
"""
A source dataset that reads and parses Yelp Review Polarity and Yelp Review Full dataset.
The generated dataset has two columns: :py:obj:`[label, text]`.
The tensor of column :py:obj:`label` is of the string type.
The tensor of column :py:obj:`text` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`.
For Polarity, `train` will read from 560,000 train samples, `test` will read from 38,000 test samples,
`all` will read from all 598,000 samples.
For Full, `train` will read from 650,000 train samples, `test` will read from 50,000 test samples,
`all` will read from all 700,000 samples (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads all samples).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> yelp_review_dataset_dir = "/path/to/yelp_review_dataset_dir"
>>> dataset = ds.YelpReviewDataset(dataset_dir=yelp_review_dataset_dir, usage='all')
About YelpReview Dataset:
The Yelp Review Full dataset consists of reviews from Yelp. It is extracted from the Yelp Dataset Challenge 2015
data, and it is mainly used for text classification.
The Yelp Review Polarity dataset is constructed from the above dataset, by considering stars 1 and 2 negative, and 3
and 4 positive.
The directory structures of these two datasets are the same.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── yelp_review_dir
├── train.csv
├── test.csv
└── readme.txt
Citation:
For Yelp Review Polarity:
.. code-block::
@article{zhangCharacterlevelConvolutionalNetworks2015,
archivePrefix = {arXiv},
eprinttype = {arxiv},
eprint = {1509.01626},
primaryClass = {cs},
title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},
abstract = {This article offers an empirical exploration on the use of character-level convolutional networks
(ConvNets) for text classification. We constructed several large-scale datasets to show that
character-level convolutional networks could achieve state-of-the-art or competitive results.
Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF
variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},
journal = {arXiv:1509.01626 [cs]},
author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
month = sep,
year = {2015},
}
Citation:
For Yelp Review Full:
.. code-block::
@article{zhangCharacterlevelConvolutionalNetworks2015,
archivePrefix = {arXiv},
eprinttype = {arxiv},
eprint = {1509.01626},
primaryClass = {cs},
title = {Character-Level {{Convolutional Networks}} for {{Text Classification}}},
abstract = {This article offers an empirical exploration on the use of character-level convolutional networks
(ConvNets) for text classification. We constructed several large-scale datasets to show that
character-level convolutional networks could achieve state-of-the-art or competitive results.
Comparisons are offered against traditional models such as bag of words, n-grams and their TFIDF
variants, and deep learning models such as word-based ConvNets and recurrent neural networks.},
journal = {arXiv:1509.01626 [cs]},
author = {Zhang, Xiang and Zhao, Junbo and LeCun, Yann},
month = sep,
year = {2015},
}
"""
@check_yelp_review_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, shuffle=Shuffle.GLOBAL, num_shards=None,
shard_id=None, num_parallel_workers=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, 'all')
def parse(self, children=None):
return cde.YelpReviewNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class YesNoDataset(MappableDataset):
"""
A source dataset for reading and parsing the YesNo dataset.
The generated dataset has three columns :py:obj:`[waveform, sample_rate, labels]`.
The tensor of column :py:obj:`waveform` is a vector of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`labels` is a scalar of the int32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This argument can only
be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> yes_no_dataset_dir = "/path/to/yes_no_dataset_directory"
>>>
>>> # Read 3 samples from YesNo dataset
>>> dataset = ds.YesNoDataset(dataset_dir=yes_no_dataset_dir, num_samples=3)
>>>
>>> # Note: In YesNo dataset, each dictionary has keys "waveform", "sample_rate", "label"
About YesNo dataset:
Yesno is an audio dataset consisting of 60 recordings of one individual saying yes or no in Hebrew; each
recording is eight words long. It was created for the Kaldi audio project by an author who wishes to
remain anonymous.
Here is the original YesNo dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── yes_no_dataset_dir
├── 1_1_0_0_1_1_0_0.wav
├── 1_0_0_0_1_1_0_0.wav
├── 1_1_0_0_1_1_0_0.wav
└──....
Citation:
.. code-block::
@NetworkResource{Kaldi_audio_project,
author = {anonymous},
url = "http://wwww.openslr.org/1/"
}
"""
@check_yes_no_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
def parse(self, children=None):
return cde.YesNoNode(self.dataset_dir, self.sampler)
class SemeionDataset(MappableDataset):
"""
A source dataset for reading and parsing Semeion dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> semeion_dataset_dir = "/path/to/semeion_dataset_directory"
>>>
>>> # 1) Get all samples from SEMEION dataset in sequence
>>> dataset = ds.SemeionDataset(dataset_dir=semeion_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 10 samples from SEMEION dataset
>>> dataset = ds.SemeionDataset(dataset_dir=semeion_dataset_dir, num_samples=10, shuffle=True)
>>>
>>> # 3) Get samples from SEMEION dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.SemeionDataset(dataset_dir=semeion_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In SEMEION dataset, each dictionary has keys: image, label.
About SEMEION dataset:
The dataset was created by Tactile Srl, Brescia, Italy (http://www.tattile.it) and donated in 1994
to Semeion Research Center of Sciences of Communication, Rome, Italy (http://www.semeion.it),
for machine learning research.
This dataset consists of 1593 records (rows) and 256 attributes (columns). Each record represents
a handwritten digit, originally scanned with a resolution of 256 grey scale. Each pixel of the each
original scanned image was first stretched, and after scaled between 0 and 1
(setting to 0 every pixel whose value was under the value 127 of the grey scale (127 included)
and setting to 1 each pixel whose original value in the grey scale was over 127). Finally, each binary image
was scaled again into a 16x16 square box (the final 256 binary attributes).
.. code-block::
.
└── semeion_dataset_dir
└──semeion.data
└──semeion.names
Citation:
.. code-block::
@article{
title={The Theory of Independent Judges, in Substance Use & Misuse 33(2)1998, pp 439-461},
author={M Buscema, MetaNet},
}
"""
@check_semeion_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
def parse(self, children=None):
return cde.SemeionNode(self.dataset_dir, self.sampler)
class TedliumDataset(MappableDataset):
"""
A source dataset for reading and parsing Tedlium dataset.
The columns of generated dataset depend on the source SPH files and the corresponding STM files.
The generated dataset has six columns :py:obj:`[waveform, sample_rate, transcript, talk_id, speaker_id,
identifier]`.
The tensor of column :py:obj:`waveform` is of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`transcript` is a scalar of the string type.
The tensor of column :py:obj:`talk_id` is a scalar of the string type.
The tensor of column :py:obj:`speaker_id` is a scalar of the string type.
The tensor of column :py:obj:`identifier` is a scalar of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
release (str): Release of the dataset, can be "release1", "release2", "release3".
usage (str, optional): Usage of this dataset.
For release1 or release2, can be `train`, `test`, ` dev` or `all`.
`train` will read from train samples,
`test` will read from test samples,
`dev` will read from dev samples,
`all` will read from all samples.
For release3, can only be "all", it will read from data samples (default=None, all samples).
extensions (str): Extensions of the SPH files, only '.sph' is valid.
(default=None, ".sph").
num_samples (int, optional): The number of audio samples to be included in the dataset
(default=None, all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain stm files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # 1) Get all train samples from TEDLIUM_release1 dataset in sequence.
>>> dataset = ds.TedliumDataset(dataset_dir="/path/to/tedlium1_dataset_directory",
... release="release1", shuffle=False)
>>>
>>> # 2) Randomly select 10 samples from TEDLIUM_release2 dataset.
>>> dataset = ds.TedliumDataset(dataset_dir="/path/to/tedlium2_dataset_directory",
... release="release2", num_samples=10, shuffle=True)
>>>
>>> # 3) Get samples from TEDLIUM_release-3 dataset for shard 0 in a 2-way distributed training.
>>> dataset = ds.TedliumDataset(dataset_dir="/path/to/tedlium3_dataset_directory",
... release="release3", num_shards=2, shard_id=0)
>>>
>>> # In TEDLIUM dataset, each dictionary has keys : waveform, sample_rate, transcript, talk_id,
>>> # speaker_id and identifier.
About TEDLIUM_release1 dataset:
The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz.
It contains about 118 hours of speech.
About TEDLIUM_release2 dataset:
This is the TED-LIUM corpus release 2, licensed under Creative Commons BY-NC-ND 3.0. All talks and text are
property of TED Conferences LLC. The TED-LIUM corpus was made from audio talks and their transcriptions available
on the TED website. We have prepared and filtered these data in order to train acoustic models to participate to
the International Workshop on Spoken Language Translation 2011 (the LIUM English/French SLT system reached the
first rank in the SLT task).
About TEDLIUM_release-3 dataset:
This is the TED-LIUM corpus release 3, licensed under Creative Commons BY-NC-ND 3.0. All talks and text are
property of TED Conferences LLC. This new TED-LIUM release was made through a collaboration between the Ubiqus
company and the LIUM (University of Le Mans, France).
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
The structure of TEDLIUM release2 is the same as TEDLIUM release1, only the data is different.
.. code-block::
.
└──TEDLIUM_release1
└── dev
├── sph
├── AlGore_2009.sph
├── BarrySchwartz_2005G.sph
├── stm
├── AlGore_2009.stm
├── BarrySchwartz_2005G.stm
└── test
├── sph
├── AimeeMullins_2009P.sph
├── BillGates_2010.sph
├── stm
├── AimeeMullins_2009P.stm
├── BillGates_2010.stm
└── train
├── sph
├── AaronHuey_2010X.sph
├── AdamGrosser_2007.sph
├── stm
├── AaronHuey_2010X.stm
├── AdamGrosser_2007.stm
└── readme
└── TEDLIUM.150k.dic
.. code-block::
.
└──TEDLIUM_release-3
└── data
├── ctl
├── sph
├── 911Mothers_2010W.sph
├── AalaElKhani.sph
├── stm
├── 911Mothers_2010W.stm
├── AalaElKhani.stm
└── doc
└── legacy
└── LM
└── speaker-adaptation
└── readme
└── TEDLIUM.150k.dic
Citation:
.. code-block::
@article{
title={TED-LIUM: an automatic speech recognition dedicated corpus},
author={A. Rousseau, P. Deléglise, Y. Estève},
journal={Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)},
year={May 2012},
biburl={https://www.openslr.org/7/}
}
@article{
title={Enhancing the TED-LIUM Corpus with Selected Data for Language Modeling and More TED Talks},
author={A. Rousseau, P. Deléglise, and Y. Estève},
journal={Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12)},
year={May 2014},
biburl={https://www.openslr.org/19/}
}
@article{
title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
author={François Hernandez, Vincent Nguyen, Sahar Ghannay, Natalia Tomashenko, and Yannick Estève},
journal={the 20th International Conference on Speech and Computer (SPECOM 2018)},
year={September 2018},
biburl={https://www.openslr.org/51/}
}
"""
@check_tedlium_dataset
def __init__(self, dataset_dir, release, usage=None, extensions=None, num_samples=None,
num_parallel_workers=None, shuffle=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.extensions = replace_none(extensions, ".sph")
self.release = release
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.TedliumNode(self.dataset_dir, self.release, self.usage, self.extensions, self.sampler)
class _SVHNDataset:
"""
Mainly for loading SVHN Dataset, and return two rows each time.
"""
def __init__(self, dataset_dir, usage):
self.dataset_dir = os.path.realpath(dataset_dir)
self.usage = usage
self.column_names = ["image", "label"]
self.usage_all = ["train", "test", "extra"]
self.data = np.array([], dtype=np.uint8)
self.labels = np.array([], dtype=np.uint32)
if self.usage == "all":
for _usage in self.usage_all:
data, label = self._load_mat(_usage)
self.data = np.concatenate((self.data, data)) if self.data.size else data
self.labels = np.concatenate((self.labels, label)) if self.labels.size else label
else:
self.data, self.labels = self._load_mat(self.usage)
def _load_mat(self, mode):
filename = mode + "_32x32.mat"
mat_data = loadmat(os.path.join(self.dataset_dir, filename))
data = np.transpose(mat_data['X'], [3, 0, 1, 2])
label = mat_data['y'].astype(np.uint32).squeeze()
np.place(label, label == 10, 0)
return data, label
def __getitem__(self, index):
return self.data[index], self.labels[index]
def __len__(self):
return len(self.data)
class SVHNDataset(GeneratorDataset):
"""
A source dataset for reading and parsing SVHN dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Specify the 'train', 'test', 'extra' or 'all' parts of dataset
(default=None, will read all samples).
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, 'num_samples' reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Raises:
RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> svhn_dataset_dir = "/path/to/svhn_dataset_directory"
>>> dataset = ds.SVHNDataset(dataset_dir=svhn_dataset_dir, usage="train")
About SVHN dataset:
SVHN dataset consists of 10 digit classes.
SVHN is obtained from house numbers in Google Street View images.
73257 digits for training, 26032 digits for testing, and 531131 additional extra training data.
Here is the original SVHN dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── svhn_dataset_dir
├── train_32x32.mat
├── test_32x32.mat
└── extra_32x32.mat
Citation:
.. code-block::
@article{
title={Reading Digits in Natural Images with Unsupervised Feature Learning},
author={Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, Andrew Y. Ng},
conference={NIPS Workshop on Deep Learning and Unsupervised Feature Learning 2011.},
year={2011},
publisher={NIPS}
url={http://ufldl.stanford.edu/housenumbers}
}
"""
@check_svhn_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=1, shuffle=None,
sampler=None, num_shards=None, shard_id=None):
self.dataset_dir = os.path.realpath(dataset_dir)
self.usage = replace_none(usage, "all")
dataset = _SVHNDataset(self.dataset_dir, self.usage)
super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class STL10Dataset(MappableDataset):
"""
A source dataset for reading and parsing STL10 dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of int32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be "train", "test",
"unlabeled", "train+unlabeled" or "all" . "train" will read from 5,000
train samples, "test" will read from 8,000 test samples,
"unlabeled" will read from all 100,000 samples, and "train+unlabeled"
will read from 105000 samples, "all" will read all the samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> stl10_dataset_dir = "/path/to/stl10_dataset_directory"
>>>
>>> # 1) Get all samples from STL10 dataset in sequence
>>> dataset = ds.STL10Dataset(dataset_dir=stl10_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from STL10 dataset
>>> dataset = ds.STL10Dataset(dataset_dir=stl10_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from STL10 dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.STL10Dataset(dataset_dir=stl10_dataset_dir, num_shards=2, shard_id=0)
About STL10 dataset:
STL10 dataset consists of 10 classes: airplane, bird, car, cat, deer, dog, horse, monkey, ship, truck.
STL10 is is inspired by the CIFAR-10 dataset.
Images are 96x96 pixels, color.
500 training images, 800 test images per class and 100000 unlabeled images.
Labels are 0-indexed, and unlabeled images have -1 as their labels.
Here is the original STL10 dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── stl10_dataset_dir
├── train_X.bin
├── train_y.bin
├── test_X.bin
├── test_y.bin
└── unlabeled_X.bin
Citation of STL10 dataset.
.. code-block::
@techreport{Coates10,
author = {Adam Coates},
title = {Learning multiple layers of features from tiny images},
year = {20010},
howpublished = {https://cs.stanford.edu/~acoates/stl10/},
description = {The STL-10 dataset consists of 96x96 RGB images in 10 classes,
with 500 training images and 800 testing images per class.
There are 5000 training images and 8000 test images.
It also has 100000 unlabeled images for unsupervised learning.
These examples are extracted from a similar but broader distribution of images.
}
}
"""
@check_stl10_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.STL10Node(self.dataset_dir, self.usage, self.sampler)
class YahooAnswersDataset(SourceDataset):
"""
A source dataset that reads and parses the YahooAnswers dataset.
The generated dataset has three columns :py:obj:`[class, title, content, answer]`.
The tensor of column :py:obj:`class` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
The tensor of column :py:obj:`answer` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read
from 1,400,000 train samples, `test` will read from 60,000 test samples, `all` will read from
all 1,460,000 samples (default=None, all samples).
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all text).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> yahoo_answers_dataset_dir = "/path/to/yahoo_answers_dataset_directory"
>>>
>>> # 1) Read 3 samples from YahooAnswers dataset
>>> dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, num_samples=3)
>>>
>>> # 2) Read train samples from YahooAnswers dataset
>>> dataset = ds.YahooAnswersDataset(dataset_dir=yahoo_answers_dataset_dir, usage="train")
About YahooAnswers dataset:
The YahooAnswers dataset consists of 630,000 text samples in 14 classes,
There are 560,000 samples in the train.csv and 70,000 samples in the test.csv.
The 10 different classes represent Society & Culture, Science & Mathematics, Health, Education & Reference,
Computers & Internet, Sports, Business & Finance, Entertainment & Music, Family & Relationships,
Politics & Government.
Here is the original YahooAnswers dataset structure.
You can unzip the dataset files into this directory structure and read by Mindspore's API.
.. code-block::
.
└── yahoo_answers_dataset_dir
├── train.csv
├── test.csv
├── classes.txt
└── readme.txt
.. code-block::
@article{YahooAnswers,
title = {Yahoo! Answers Topic Classification Dataset},
author = {Xiang Zhang},
year = {2015},
howpublished = {}
}
"""
@check_yahoo_answers_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.YahooAnswersNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
|
md.py
|
print("Threading, (c) Verloka Vadim 2018\n\n\n")
import threading
import time
tLock = threading.Lock()
def Timer(name, delaySec, repeat, lock = False):
print("Timer {} started.".format(name))
if lock:
tLock.acquire()
while repeat > 0:
time.sleep(delaySec)
print("{}: {}".format(name, time.ctime(time.time())))
repeat -= 1
if lock:
tLock.release()
print("Timer {} completed.".format(name))
def Main():
#Четыри потока, которые будут работать одновременно
t1 = threading.Thread(target=Timer, args=("Timer 1", 1, 5, True))
t2 = threading.Thread(target=Timer, args=("Timer 2", 1, 5, True))
t3 = threading.Thread(target=Timer, args=("Timer 3", 1, 5, True))
t4 = threading.Thread(target=Timer, args=("Timer 3", 1, 5, True))
t5 = threading.Thread(target=Timer, args=("Timer 4", 1, 5, False))
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
print("Main is completed.")
if __name__ == "__main__":
Main()
|
util.py
|
# -*- coding: utf-8 -*-
import random
import re
import string
import threading
import traceback
from typing import Any, Callable, List, Dict, Optional, Union
# noinspection PyPep8Naming
import queue as Queue
import logging
from telebot import types
try:
import ujson as json
except ImportError:
import json
try:
# noinspection PyPackageRequirements
from PIL import Image
from io import BytesIO
pil_imported = True
except:
pil_imported = False
MAX_MESSAGE_LENGTH = 4096
logger = logging.getLogger('TeleBot')
thread_local = threading.local()
content_type_media = [
'text', 'audio', 'animation', 'document', 'photo', 'sticker', 'video', 'video_note', 'voice', 'contact', 'dice', 'poll',
'venue', 'location'
]
content_type_service = [
'new_chat_members', 'left_chat_member', 'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'group_chat_created',
'supergroup_chat_created', 'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'proximity_alert_triggered', 'voice_chat_scheduled', 'voice_chat_started', 'voice_chat_ended',
'voice_chat_participants_invited', 'message_auto_delete_timer_changed'
]
update_types = [
"update_id", "message", "edited_message", "channel_post", "edited_channel_post", "inline_query",
"chosen_inline_result", "callback_query", "shipping_query", "pre_checkout_query", "poll", "poll_answer",
"my_chat_member", "chat_member", "chat_join_request"
]
class WorkerThread(threading.Thread):
count = 0
def __init__(self, exception_callback=None, queue=None, name=None):
if not name:
name = "WorkerThread{0}".format(self.__class__.count + 1)
self.__class__.count += 1
if not queue:
queue = Queue.Queue()
threading.Thread.__init__(self, name=name)
self.queue = queue
self.daemon = True
self.received_task_event = threading.Event()
self.done_event = threading.Event()
self.exception_event = threading.Event()
self.continue_event = threading.Event()
self.exception_callback = exception_callback
self.exception_info = None
self._running = True
self.start()
def run(self):
while self._running:
try:
task, args, kwargs = self.queue.get(block=True, timeout=.5)
self.continue_event.clear()
self.received_task_event.clear()
self.done_event.clear()
self.exception_event.clear()
logger.debug("Received task")
self.received_task_event.set()
task(*args, **kwargs)
logger.debug("Task complete")
self.done_event.set()
except Queue.Empty:
pass
except Exception as e:
logger.debug(type(e).__name__ + " occurred, args=" + str(e.args) + "\n" + traceback.format_exc())
self.exception_info = e
self.exception_event.set()
if self.exception_callback:
self.exception_callback(self, self.exception_info)
self.continue_event.wait()
def put(self, task, *args, **kwargs):
self.queue.put((task, args, kwargs))
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
self.continue_event.set()
def stop(self):
self._running = False
class ThreadPool:
def __init__(self, num_threads=2):
self.tasks = Queue.Queue()
self.workers = [WorkerThread(self.on_exception, self.tasks) for _ in range(num_threads)]
self.num_threads = num_threads
self.exception_event = threading.Event()
self.exception_info = None
def put(self, func, *args, **kwargs):
self.tasks.put((func, args, kwargs))
def on_exception(self, worker_thread, exc_info):
self.exception_info = exc_info
self.exception_event.set()
worker_thread.continue_event.set()
def raise_exceptions(self):
if self.exception_event.is_set():
raise self.exception_info
def clear_exceptions(self):
self.exception_event.clear()
def close(self):
for worker in self.workers:
worker.stop()
for worker in self.workers:
worker.join()
class AsyncTask:
def __init__(self, target, *args, **kwargs):
self.target = target
self.args = args
self.kwargs = kwargs
self.done = False
self.thread = threading.Thread(target=self._run)
self.thread.start()
def _run(self):
try:
self.result = self.target(*self.args, **self.kwargs)
except Exception as e:
self.result = e
self.done = True
def wait(self):
if not self.done:
self.thread.join()
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
class CustomRequestResponse():
def __init__(self, json_text, status_code = 200, reason = ""):
self.status_code = status_code
self.text = json_text
self.reason = reason
def json(self):
return json.loads(self.text)
def async_dec():
def decorator(fn):
def wrapper(*args, **kwargs):
return AsyncTask(fn, *args, **kwargs)
return wrapper
return decorator
def is_string(var):
return isinstance(var, str)
def is_dict(var):
return isinstance(var, dict)
def is_bytes(var):
return isinstance(var, bytes)
def is_pil_image(var):
return pil_imported and isinstance(var, Image.Image)
def pil_image_to_file(image, extension='JPEG', quality='web_low'):
if pil_imported:
photoBuffer = BytesIO()
image.convert('RGB').save(photoBuffer, extension, quality=quality)
photoBuffer.seek(0)
return photoBuffer
else:
raise RuntimeError('PIL module is not imported')
def is_command(text: str) -> bool:
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
if text is None: return False
return text.startswith('/')
def extract_command(text: str) -> Union[str, None]:
"""
Extracts the command from `text` (minus the '/') if `text` is a command (see is_command).
If `text` is not a command, this function returns None.
Examples:
extract_command('/help'): 'help'
extract_command('/help@BotName'): 'help'
extract_command('/search black eyed peas'): 'search'
extract_command('Good day to you'): None
:param text: String to extract the command from
:return: the command if `text` is a command (according to is_command), else None.
"""
if text is None: return None
return text.split()[0].split('@')[0][1:] if is_command(text) else None
def extract_arguments(text: str) -> str:
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile(r"/\w*(@\w*)*\s*([\s\S]*)", re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None
def split_string(text: str, chars_per_string: int) -> List[str]:
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
:param text: The text to split
:param chars_per_string: The number of characters per line the text is split into.
:return: The splitted text as a list of strings.
"""
return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]
def smart_split(text: str, chars_per_string: int=MAX_MESSAGE_LENGTH) -> List[str]:
"""
Splits one string into multiple strings, with a maximum amount of `chars_per_string` characters per string.
This is very useful for splitting one giant message into multiples.
If `chars_per_string` > 4096: `chars_per_string` = 4096.
Splits by '\n', '. ' or ' ' in exactly this priority.
:param text: The text to split
:param chars_per_string: The number of maximum characters per part the text is split to.
:return: The splitted text as a list of strings.
"""
def _text_before_last(substr: str) -> str:
return substr.join(part.split(substr)[:-1]) + substr
if chars_per_string > MAX_MESSAGE_LENGTH: chars_per_string = MAX_MESSAGE_LENGTH
parts = []
while True:
if len(text) < chars_per_string:
parts.append(text)
return parts
part = text[:chars_per_string]
if "\n" in part: part = _text_before_last("\n")
elif ". " in part: part = _text_before_last(". ")
elif " " in part: part = _text_before_last(" ")
parts.append(part)
text = text[len(part):]
def escape(text: str) -> str:
"""
Replaces the following chars in `text` ('&' with '&', '<' with '<' and '>' with '>').
:param text: the text to escape
:return: the escaped text
"""
chars = {"&": "&", "<": "<", ">": ">"}
for old, new in chars.items(): text = text.replace(old, new)
return text
def user_link(user: types.User, include_id: bool=False) -> str:
"""
Returns an HTML user link. This is useful for reports.
Attention: Don't forget to set parse_mode to 'HTML'!
Example:
bot.send_message(your_user_id, user_link(message.from_user) + ' started the bot!', parse_mode='HTML')
:param user: the user (not the user_id)
:param include_id: include the user_id
:return: HTML user link
"""
name = escape(user.first_name)
return (f"<a href='tg://user?id={user.id}'>{name}</a>"
+ (f" (<pre>{user.id}</pre>)" if include_id else ""))
def quick_markup(values: Dict[str, Dict[str, Any]], row_width: int=2) -> types.InlineKeyboardMarkup:
"""
Returns a reply markup from a dict in this format: {'text': kwargs}
This is useful to avoid always typing 'btn1 = InlineKeyboardButton(...)' 'btn2 = InlineKeyboardButton(...)'
Example:
quick_markup({
'Twitter': {'url': 'https://twitter.com'},
'Facebook': {'url': 'https://facebook.com'},
'Back': {'callback_data': 'whatever'}
}, row_width=2):
returns an InlineKeyboardMarkup with two buttons in a row, one leading to Twitter, the other to facebook
and a back button below
kwargs can be:
{
'url': None,
'callback_data': None,
'switch_inline_query': None,
'switch_inline_query_current_chat': None,
'callback_game': None,
'pay': None,
'login_url': None
}
:param values: a dict containing all buttons to create in this format: {text: kwargs} {str:}
:param row_width: int row width
:return: InlineKeyboardMarkup
"""
markup = types.InlineKeyboardMarkup(row_width=row_width)
buttons = [
types.InlineKeyboardButton(text=text, **kwargs)
for text, kwargs in values.items()
]
markup.add(*buttons)
return markup
# CREDITS TO http://stackoverflow.com/questions/12317940#answer-12320352
def or_set(self):
self._set()
self.changed()
def or_clear(self):
self._clear()
self.changed()
def orify(e, changed_callback):
if not hasattr(e, "_set"):
e._set = e.set
if not hasattr(e, "_clear"):
e._clear = e.clear
e.changed = changed_callback
e.set = lambda: or_set(e)
e.clear = lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [ev.is_set() for ev in events]
if any(bools):
or_event.set()
else:
or_event.clear()
def busy_wait():
while not or_event.is_set():
# noinspection PyProtectedMember
or_event._wait(3)
for e in events:
orify(e, changed)
or_event._wait = or_event.wait
or_event.wait = busy_wait
changed()
return or_event
def per_thread(key, construct_value, reset=False):
if reset or not hasattr(thread_local, key):
value = construct_value()
setattr(thread_local, key, value)
return getattr(thread_local, key)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
# https://stackoverflow.com/a/312464/9935473
for i in range(0, len(lst), n):
yield lst[i:i + n]
def generate_random_token():
return ''.join(random.sample(string.ascii_letters, 16))
def deprecated(warn: bool=True, alternative: Optional[Callable]=None):
"""
Use this decorator to mark functions as deprecated.
When the function is used, an info (or warning if `warn` is True) is logged.
:param warn: If True a warning is logged else an info
:param alternative: The new function to use instead
"""
def decorator(function):
def wrapper(*args, **kwargs):
info = f"`{function.__name__}` is deprecated." + (f" Use `{alternative.__name__}` instead" if alternative else "")
if not warn:
logger.info(info)
else:
logger.warning(info)
return function(*args, **kwargs)
return wrapper
return decorator
# Cloud helpers
def webhook_google_functions(bot, request):
"""A webhook endpoint for Google Cloud Functions FaaS."""
if request.is_json:
try:
request_json = request.get_json()
update = types.Update.de_json(request_json)
bot.process_new_updates([update])
return ''
except Exception as e:
print(e)
return 'Bot FAIL', 400
else:
return 'Bot ON'
def antiflood(function, *args, **kwargs):
"""
Use this function inside loops in order to avoid getting TooManyRequests error.
Example:
from telebot.util import antiflood
for chat_id in chat_id_list:
msg = antiflood(bot.send_message, chat_id, text)
You want get the
"""
from telebot.apihelper import ApiTelegramException
from time import sleep
msg = None
try:
msg = function(*args, **kwargs)
except ApiTelegramException as ex:
if ex.error_code == 429:
sleep(ex.result_json['parameters']['retry_after'])
msg = function(*args, **kwargs)
finally:
return msg
|
crazyflie_calibration.py
|
#!/usr/bin/env python
import roslib
import rospy
import numpy as np
from geometry_msgs.msg import Twist
from test_package.msg import aircraft_controls
from threading import Thread
import sys, select, termios, tty
thrust_inc = 200
angle_inc = 200
thrust = 0
elevator = 0
rudder = 0
status = 0
speedBindings={
'q':(thrust_inc,0,0),
'z':(-thrust_inc,0,0),
'w':(0,angle_inc,0),
'x':(0,-angle_inc,0),
'e':(0,0,angle_inc),
'c':(0,0,-angle_inc),
}
def getKey():
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def process_keyboard():
try:
print vels(0,0,0)
while(1):
key = getKey()
if key in speedBindings.keys():
global thrust, elevator, rudder, status
thrust = thrust + speedBindings[key][0]
elevator = elevator + speedBindings[key][1]
rudder = rudder + speedBindings[key][2]
print vels(thrust,elevator,rudder)
status = (status + 1) % 15
else:
if (key == '\x03'):
break
#twist = Twist()
#twist.linear.x = elevator
#twist.linear.y = rudder
#twist.linear.z = thrust
except Exception, e:
print e
finally:
twist = aircraft_controls()
twist.thrust = 0
twist.rudder = 0
twist.elevator = 0
pub.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
def vels(thrust, elevator, rudder):
return "currently:\tthrust %s\televator %s\trudder %s" % (thrust,elevator,rudder)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
#pub = rospy.Publisher('/crazyflie/cmd_vel', Twist, queue_size = 1)
pub = rospy.Publisher('/kite_controls', aircraft_controls, queue_size = 1)
rospy.init_node('talker')
rate = rospy.Rate(20)
t = Thread(target=process_keyboard)
t.start()
while not rospy.is_shutdown():
twist = aircraft_controls()
twist.thrust = thrust
twist.rudder = rudder
twist.elevator = elevator
#twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
pub.publish(twist)
rate.sleep()
|
tcpserver.py
|
import socket, threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on %s:%d" %(bind_ip, bind_port))
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(1024)
print("[*] Received: %s" % request)
# send back a packet
client_socket.send(b'ACK!')
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from: %s:%d" % (addr[0],addr[1]))
#spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
websockets.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight SmartCash Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import queue
import threading, os, json
from collections import defaultdict
try:
from SimpleWebSocketServer import WebSocket, SimpleSSLWebSocketServer
except ImportError:
import sys
sys.exit("install SimpleWebSocketServer")
from . import util
request_queue = queue.Queue()
class ElectrumWebSocket(WebSocket):
def handleMessage(self):
assert self.data[0:3] == 'id:'
util.print_error("message received", self.data)
request_id = self.data[3:]
request_queue.put((self, request_id))
def handleConnected(self):
util.print_error("connected", self.address)
def handleClose(self):
util.print_error("closed", self.address)
class WsClientThread(util.DaemonThread):
def __init__(self, config, network):
util.DaemonThread.__init__(self)
self.network = network
self.config = config
self.response_queue = queue.Queue()
self.subscriptions = defaultdict(list)
def make_request(self, request_id):
# read json file
rdir = self.config.get('requests_dir')
n = os.path.join(rdir, 'req', request_id[0], request_id[1], request_id, request_id + '.json')
with open(n, encoding='utf-8') as f:
s = f.read()
d = json.loads(s)
addr = d.get('address')
amount = d.get('amount')
return addr, amount
def reading_thread(self):
while self.is_running():
try:
ws, request_id = request_queue.get()
except queue.Empty:
continue
try:
addr, amount = self.make_request(request_id)
except:
continue
l = self.subscriptions.get(addr, [])
l.append((ws, amount))
self.subscriptions[addr] = l
h = self.network.addr_to_scripthash(addr)
self.network.send([('blockchain.scripthash.subscribe', [h])], self.response_queue.put)
def run(self):
threading.Thread(target=self.reading_thread).start()
while self.is_running():
try:
r = self.response_queue.get(timeout=0.1)
except queue.Empty:
continue
util.print_error('response', r)
method = r.get('method')
params = r.get('params')
result = r.get('result')
if result is None:
continue
if method == 'blockchain.scripthash.subscribe':
self.network.send([('blockchain.scripthash.get_balance', params)], self.response_queue.put)
elif method == 'blockchain.scripthash.get_balance':
h = params[0]
addr = self.network.h2addr.get(h, None)
if addr is None:
util.print_error("can't find address for scripthash: %s" % h)
l = self.subscriptions.get(addr, [])
for ws, amount in l:
if not ws.closed:
if sum(result.values()) >=amount:
ws.sendMessage('paid')
class WebSocketServer(threading.Thread):
def __init__(self, config, ns):
threading.Thread.__init__(self)
self.config = config
self.net_server = ns
self.daemon = True
def run(self):
t = WsClientThread(self.config, self.net_server)
t.start()
host = self.config.get('websocket_server')
port = self.config.get('websocket_port', 9999)
certfile = self.config.get('ssl_chain')
keyfile = self.config.get('ssl_privkey')
self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile)
self.server.serveforever()
|
streaming.py
|
import re
import sys
import time
import copy
import threading
import codecs
import tweepy
from tweepy.api import API
# Special Exceptions
from requests.exceptions import Timeout
from requests.exceptions import ConnectionError
from requests.packages.urllib3.exceptions import ReadTimeoutError
# Local Files
sys.path.append("..")
from scripts import mongio
def get_tracker(queries):
return {query: {'volume': 0, 'scores': []} for query in queries}
def get_reverse(queries):
reverse = {}
for query in queries:
for keyword in queries[query]:
reverse[keyword] = query
return reverse
def elapsed_time(start):
return (time.time()-start)
def process(text):
text = re.sub("[0-9]+", "number", text)
text = re.sub("#", "", text)
text = re.sub("\n", "", text)
text = re.sub("$[^\s]+", "", text)
text = re.sub("@[^\s]+", "", text)
text = re.sub("(http|https)://[^\s]*", "", text)
text = re.sub("[^\s]+@[^\s]+", "", text)
text = re.sub('[^a-z A-Z]+', '', text)
return text
class Listener(tweepy.StreamListener):
def __init__(self, auth, queries, refresh, sentiment=False, debug=False):
self.api = tweepy.API(auth)
self.queries = queries.keys()
self.refresh = refresh
self.sentiment = sentiment
self.processing = False
self.timer = time.time()
self.debug = debug
self.reverse = get_reverse(queries)
self.tracker = get_tracker(self.queries)
def process(self):
# Reset timer
self.timer = time.time()
# Copy tracking data to temporary tracker
previous_tracker = copy.deepcopy(self.tracker)
self.tracker = get_tracker(self.queries)
# Update database
for query in previous_tracker:
if self.sentiment:
scores = previous_tracker[query]['scores']
try:
sentiment = round(sum(scores)/len(scores) ,2)
except ZeroDivisionError:
sentiment = 0
else:
sentiment = 0
volume = previous_tracker[query]['volume']
timestamp = time.strftime('%m/%d/%Y %H:%M:%S')
mongio.push(query, 'logs', {'timestamp' : timestamp,
'volume' : volume,
'sentiment' : sentiment})
if self.debug:
print('Query', query)
print('Timestamp', timestamp)
print('Volume', volume)
print('Sentiment', sentiment)
print('-------\n')
self.processing = False
def on_status(self, status):
original_tweet = status.text
# For every incoming tweet...
for query in self.queries:
if query.lower() in original_tweet.lower():
# Categorize tweet
lookup = self.reverse[query]
# Increment count
self.tracker[lookup]['volume'] += 1
# Sentiment analysis
if self.sentiment:
processed_tweet = process(original_tweet.lower())
score = SentimentIntensityAnalyzer().polarity_scores(processed_tweet)['compound']
self.tracker[lookup]['scores'].append(score)
# Check refresh
if elapsed_time(self.timer) >= self.refresh:
if not self.processing:
self.processing = True
processing_thread = threading.Thread(target=self.process)
processing_thread.start()
return True
def on_error(self, status_code):
print("{0} Error: {1}\n".format(time.strftime('%m/%d/%Y %H:%M:%S'), status_code))
if status_code == 413 or status_code == 420 or status_code == 503:
return False
return True # To continue listening
def on_timeout(self):
print("Timeout...")
return True # To continue listening
# Streaming --------------------------------------------------
def streamer(credentials, queries, refresh, sentiment=False, debug=False):
keywords = [i for j in queries.values() for i in j]
# User Error Checks
if len(queries) <= 0: print("Error: You must include at least one query."); return
if len(queries) >= 10: print("Warning: Fewer than ten query recommended.")
if len(keywords) <= 0: print("Error: You must include at least one keyword."); return
if len(keywords) >= 20: print("Warning: Fewer than twenty keywords recommended.")
if refresh <= 0: print("Error: Refresh rate must be greater than 0"); return
auth = tweepy.OAuthHandler(credentials[0], credentials[1])
auth.set_access_token(credentials[2], credentials[3])
if sentiment:
global SentimentIntensityAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
while True:
# Start streaming -----------------------------
try:
print("Streaming Now...")
listener = Listener(auth, queries, refresh, sentiment, debug)
stream = tweepy.Stream(auth, listener)
stream.filter(track=keywords)
except (Timeout, ConnectionError, ReadTimeoutError):
print("{0} Error: Connection Dropped\n".format(time.strftime('%m/%d/%Y %H:%M:%S')))
print("Re-establishing Connection...")
time.sleep((15*60)+1) # Wait at least 15 minutes before restarting listener
# ---------------------------------------------
|
trezor.py
|
from binascii import hexlify, unhexlify
from collections import defaultdict
import traceback
import sys
from electroncash.util import bfh, bh2u, versiontuple, UserCancelled
from electroncash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT)
from electroncash.i18n import _
from electroncash.networks import NetworkConstants
from electroncash.plugins import BasePlugin, Device
from electroncash.transaction import deserialize
from electroncash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electroncash.address import ScriptOutput
from ..hw_wallet import HW_PluginBase
try:
import trezorlib
import trezorlib.transport
from .clientbase import (TrezorClientBase, parse_path)
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
import traceback
traceback.print_exc()
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
PASSPHRASE_ON_DEVICE = object()
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password, *, use_cache=False):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise RuntimeError(_('Offline signing with {} is not supported.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
def needs_prevtx(self):
# Trezor does need previous transactions for Bitcoin Cash
return True
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
self.library_version = library_version
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown'
or versiontuple(library_version) < self.minimum_library
or hasattr(self, "maximum_library") and versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
max_version_str = version_str(self.maximum_library) if hasattr(self, "maximum_library") else "inf"
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), max_version_str))
self.print_stderr(self.libraries_available_message)
return False
return True
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0)
for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
# Note: testnet supported only by unofficial firmware
return "Bcash Testnet" if NetworkConstants.TESTNET else "Bcash"
def _chk_settings_do_popup_maybe(self, handler, method, model, settings):
recovery_type = settings and settings[-1]
if (method == TIM_RECOVER
and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS
and model != 'T'): # I'm pretty sure this only applies to the '1' not the 'T'
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"Either method is secure since no secret information "
"will be entered into your computer."
).format(self.device)
choices = [
# Must be short as Qt doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
loops = [wizard.loop] # We do it this way so as to pop the loop when it's done. This avoids possible multiple calls to loop.exit from different code paths.
handler._loops = loops # hack to prevent trezor transport errors from stalling the UI here. see clientbase.py button_request which aborts the wizard event loop on transport error
try:
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
# We do this popup business here because doing it in the
# thread interferes with whatever other popups may happen
# from trezorlib. So we do this all-stop popup first if needed.
self._chk_settings_do_popup_maybe(handler, method, model, settings)
errors = []
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, loops, errors))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
loops.pop()
if exit_code != 0:
if errors and isinstance(errors[0], BaseException):
msg = str(errors[0]).strip()
if msg:
# we do this here in the main thread so as to give
# the user the opportunity to actually *see* the error
# window before the wizard "goes back"
handler.show_error(msg)
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
finally:
delattr(handler, '_loops') # /clean up hack
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, loops, errors):
exit_code = 0
try:
self._initialize_device(settings, method, device_id)
except UserCancelled:
exit_code = 2
except BaseException as e:
traceback.print_exc(file=sys.stderr)
errors.append(e)
exit_code = 1
finally:
l = loops.copy() # leverage the GIL here for thread safety.
if l:
l[0].exit(exit_code)
def _initialize_device(self, settings, method, device_id):
item, label, pin_protection, passphrase_protection, recovery_type = settings
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
client.reset_device(
strength=64 * (item + 2), # 128, 192 or 256
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=recovery_type,
word_count=6 * (item + 2), # 12, 18 or 24
passphrase_protection=passphrase_protection,
pin_protection=pin_protection,
label=label)
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
raise Exception(_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
creating = not device_info.initialized
if creating:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard', creating)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, is_multisig):
if is_multisig:
return InputScriptType.SPENDMULTISIG
else:
return InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx, client)
details = SignTx(lock_time=tx.locktime)
signatures, signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [bh2u(x) for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
# prepare multisig, if available
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
script_type = self.get_trezor_input_script_type(multisig is not None)
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(multisig is not None)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx, client):
def create_output_by_derivation():
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
script_type = OutputScriptType.PAYTOADDRESS if multisig is None else OutputScriptType.PAYTOMULTISIG
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
script = address.to_script()
# We only support OP_RETURN with one constant push
if (script[0] == 0x6a and amount == 0 and
script[1] == len(script) - 2 and
script[1] <= 75):
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = script[2:]
else:
raise Exception(_("Unsupported output script."))
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
addr_format = address.FMT_LEGACY
if client.get_trezor_model() == 'T':
if client.atleast_version(2, 0, 8):
addr_format = address.FMT_UI
elif client.atleast_version(2, 0, 7):
addr_format = address.FMT_CASHADDR
else:
if client.atleast_version(1, 6, 2):
addr_format = address.FMT_UI
txoutputtype.address = address.to_full_string(addr_format)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = self.is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m, script_type = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def is_any_tx_output_on_change_branch(self, tx):
if not tx.output_info:
return False
for _type, address, _amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and info[0][0] == 1:
return True
return False
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
# for electron-cash previous tx is never needed, since it uses
# bip-143 signatures.
return None
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
flag.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Bool, Property, Float, CInt, List, Str, Any
from traitsui.api import View, Item, HGroup, spring
# ============= standard library imports ========================
from threading import Timer as OneShotTimer, Thread, Event
import time
# ============= local library imports ==========================
from pychron.core.helpers.timer import Timer as PTimer
from pychron.loggable import Loggable
def convert_to_bool(v):
try:
v = float(v)
return bool(v)
except:
return v.lower().strip() in ['t', 'true', 'on']
class Flag(Loggable):
_set = Bool(False)
display_state = Property(Bool, depends_on='_set')
owner = Str
def __init__(self, name, *args, **kw):
self.name = name
self.timeout = 60
self._pinged = None
self._monitor_thread = None
self._monitor_evt = None
super(Flag, self).__init__(*args, **kw)
def set_owner(self, owner):
self.owner = owner
def traits_view(self):
v = View(HGroup(Item('name', show_label=False, style='readonly'),
spring,
Item('display_state', show_label=False)))
return v
def _get_display_state(self):
return self._set
def _set_display_state(self, v):
self.set(v)
def ping(self):
self._pinged = time.time()
return self._pinged
def get(self, *args, **kw):
return int(self._set)
def set(self, value):
ovalue = value
if isinstance(value, str):
value = convert_to_bool(value)
else:
value = bool(value)
self.info('setting flag state to {} ({})'.format(value, ovalue))
if value:
self._monitor_evt = Event()
self._monitor_thread = Thread(target=self._monitor)
self._monitor_thread.setDaemon(1)
self._monitor_thread.start()
else:
if self._monitor_evt:
self._monitor_evt.set()
self._set = value
return True
def clear(self):
self.info('clearing flag')
self._set = False
if self._monitor_evt:
self._monitor_evt.set()
def isSet(self):
return self._set
def _monitor(self):
evt = self._monitor_evt
timeout = self.timeout
self._pinged = time.time()
while not evt.is_set():
if time.time()-self._pinged > timeout:
if self._set:
self.info('auto canceling flag')
self.clear()
break
time.sleep(5)
class TimedFlag(Flag):
duration = Float(1)
display_time = Property(depends_on='_time_remaining')
_time_remaining = CInt(0)
def __init__(self, *args, **kw):
super(TimedFlag, self).__init__(*args, **kw)
self._start_time = None
self._uperiod = 1000
self._ping_result = None
self._update_timer = None
def clear(self):
super(TimedFlag, self).clear()
self._update_timer.Stop()
self._ping_result = 'Complete'
def ping(self):
ret = super(TimedFlag, self).ping()
if self._ping_result == 'Complete':
ret = 'Complete'
return ret
def _get_display_time(self):
return self._time_remaining
def traits_view(self):
v = View(HGroup(Item('name', style='readonly'),
spring,
Item('display_time',
format_str='%03i', style='readonly'),
Item('display_state'),
show_labels=False))
return v
def set(self, value):
self._ping_result = None
set_duration = True
if isinstance(value, bool):
set_duration = False
try:
value = float(value)
except ValueError:
return 'Invalid flag value'
super(TimedFlag, self).set(value)
if self.isSet():
if set_duration:
self.duration = value
self._time_remaining = value
else:
self._time_remaining = self.duration
self._start_time = time.time()
self._update_timer = PTimer(self._uperiod, self._update_time)
t = OneShotTimer(self.duration, self.clear)
t.start()
return True
def isStarted(self):
return self._start_time is not None
def get(self, *args, **kw):
t = 0
if self.isSet() and self.isStarted():
t = max(0, self.duration - (time.time() - self._start_time))
return t
def _update_time(self):
self._time_remaining = round(self.get())
class ValveFlag(Flag):
"""
a ValveFlag holds a list of valves keys (A, B, ...)
if the flag is set then the these valves should be locked out
from being actuated by ip addresses other than the owner of this
flag
valves should (can) not occur in multiple ValveFlags
"""
valves = List
owner = Str
valves_str = Property(depends_on='valves')
manager = Any
def set(self):
super(ValveFlag, self).set()
owner = self.owner if self._set else None
for vi in self.valves:
self.manager.set_valve_owner(vi, owner)
def traits_view(self):
v = View(HGroup(Item('name', show_label=False, style='readonly'),
Item('valves_str', style='readonly',
label='Valves')))
return v
def _get_valves_str(self):
return ','.join(self.valves)
# ============= EOF =============================================
|
object_detection_tf_vectorization_thread.py
|
"""
Vectorization version.
---------------------------
use Vectorization to speed up.
"""
import os
import tensorflow as tf
import threading
import argparse
import logging
import time
from queue import Queue
import queue
import numpy as np
import cv2
from myutil import fps_measure
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
LABEL_FILE_NAME = "pascal_label_map.pbtxt"
NUM_CLASSES = 90
image_per_run = 8
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-v', '--video', type=str, required=True,
help="video file for detection")
arg_parser.add_argument('-m', '--model', type=str, default='my_exporter',
help='directory to find model')
arg_parser.add_argument('-s', '--save', type=bool, default=False,
help='directory to find model')
args = arg_parser.parse_args()
class SAFE_NUM():
def __init__(self, num):
self.num = num
self.lock = threading.Lock()
def get_num(self):
self.lock.acquire()
num = self.num
self.lock.release()
return num
def set_num(self, num):
self.lock.acquire()
self.num = num
self.lock.release()
is_quit = SAFE_NUM(0)
video_end = SAFE_NUM(0)
def load_graph(model_name=args.model):
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = model_name + '/frozen_inference_graph.pb'
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_label_map(label_map_name, num_class):
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', label_map_name)
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map,
max_num_classes=num_class, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def detect_object(detection_graph, sess, image, image_list, category_index):
with detection_graph.as_default():
with sess.as_default() as sess:
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# build feed_dict
feed_dict = {}
for i in range(image_per_run):
feed_dict.update({"image_ph%d:0" % i: image_list[i]})
# Actual detection.
feed_image = sess.run(image,
feed_dict=feed_dict)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: feed_image})
# Visualization of the results of a detection.
for i in range(feed_image.shape[0]):
vis_util.visualize_boxes_and_labels_on_image_array(
feed_image[i],
np.squeeze(boxes[i]),
np.squeeze(classes[i]).astype(np.int32),
np.squeeze(scores[i]),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.20)
return feed_image
def image_worker(image_q, video_file):
"""Process that put imge into image_q."""
logging.info("image worker start")
video_capture = cv2.VideoCapture(video_file)
ret, frame = video_capture.read()
if not ret:
logging.error("Can not read video file, please check!!!!")
while ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if is_quit.get_num():
break
image_q.put(frame)
logging.debug("put image into queue")
ret, frame = video_capture.read()
video_end.set_num(1)
video_capture.release()
def get_n_frame(f_queue, n):
multi_images = []
counts = n
b_time = time.time()
while n > 0:
# logging.debug("get image need: %d" % (n,))
if video_end.get_num():
try:
image = f_queue.get(block=False)
except queue.Empty:
# video file reached the end and image queue is empty, break
break
else:
image = f_queue.get()
# image = cv2.resize(image, None, fx=4, fy=4, interpolation=cv2.INTER_CUBIC)
multi_images.append(image)
n -= 1
logging.info("get %d frames, internal: %f" % (counts, time.time() - b_time))
return multi_images
def object_detection_worker(image_q, processed_q, detection_graph, category_index, fps=None):
"""a process to do the detection_graph."""
logging.info("detection worker start")
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(graph=detection_graph, config=config)
with detection_graph.as_default():
image_ph_list = [tf.placeholder(tf.uint8, shape=[], name="image_ph%d" % i)
for i in range(image_per_run)]
frames = tf.stack(image_ph_list)
while True:
if is_quit.get_num() == 1:
# before break, try to get some image, in case image worker is blocked.
image_q.get(block=False)
break
images_list = get_n_frame(image_q, image_per_run)
# print("image shape:", frame.shape)
if len(images_list) == 0:
break
ann_image = detect_object(detection_graph, sess, frames, images_list, category_index)
for img in ann_image:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if fps:
fps.add_frame()
processed_q.put(img)
def main():
# configure logger
logging.basicConfig(
level=logging.INFO,
)
image_q = Queue(maxsize=200)
processed_q = Queue(maxsize=200)
# setup image input thread
input_process = threading.Thread(target=image_worker, args=(image_q, args.video))
detection_graph = load_graph(model_name=args.model)
category_index = load_label_map(label_map_name=LABEL_FILE_NAME, num_class=NUM_CLASSES)
# setup fps counter
fps = fps_measure.FPS()
fps.start_count()
# setup object detection process
detector_process = threading.Thread(
target=object_detection_worker,
args=(image_q, processed_q, detection_graph, category_index, fps))
input_process.start()
detector_process.start()
if args.save:
print("open video handle")
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('output.avi', fourcc, 20, (512, 288))
while True and (is_quit.get_num() == 0):
ann_image = processed_q.get()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(ann_image, 'FPS:{}'.format(int(fps.get_fps())), (50, 50), font, 2, (255, 255, 255), 2, cv2.LINE_AA)
if args.save:
print("write into video", ann_image.shape)
out.write(ann_image)
cv2.imshow('frame', ann_image)
if cv2.waitKey(1) & 0xFF == ord('q'):
is_quit.set_num(1)
break
# input_process.terminate()
# detector_process.terminate()
#
# input_process.join()
# detector_process.join()
print("release vcideo handle")
if args.save:
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
db_import_multiplexer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""A loading-only EventMultiplexer that actually populates a SQLite DB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import threading
import time
import six
from six.moves import queue, xrange # pylint: disable=redefined-builtin
from tensorboard import data_compat
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.backend.event_processing import plugin_event_multiplexer
from tensorboard.backend.event_processing import sqlite_writer
from tensorboard.compat import tf
from tensorboard.compat.proto import event_pb2
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
class DbImportMultiplexer(plugin_event_multiplexer.EventMultiplexer):
"""A loading-only `EventMultiplexer` that populates a SQLite DB.
This EventMultiplexer only loads data; the read APIs always return
empty results, since all data is accessed instead via SQL against
the db_connection_provider wrapped by this multiplexer.
"""
def __init__(
self,
db_uri,
db_connection_provider,
purge_orphaned_data,
max_reload_threads,
):
"""Constructor for `DbImportMultiplexer`.
Args:
db_uri: A URI to the database file in use.
db_connection_provider: Provider function for creating a DB connection.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
max_reload_threads: The max number of threads that TensorBoard can use
to reload runs. Each thread reloads one run at a time. If not provided,
reloads runs serially (one after another).
"""
logger.info("DbImportMultiplexer initializing for %s", db_uri)
super(DbImportMultiplexer, self).__init__()
self.db_uri = db_uri
self.db_connection_provider = db_connection_provider
self._purge_orphaned_data = purge_orphaned_data
self._max_reload_threads = max_reload_threads
self._event_sink = None
self._run_loaders = {}
if self._purge_orphaned_data:
logger.warn(
"--db_import does not yet support purging orphaned data"
)
conn = self.db_connection_provider()
# Set the DB in WAL mode so reads don't block writes.
conn.execute("PRAGMA journal_mode=wal")
conn.execute("PRAGMA synchronous=normal") # Recommended for WAL mode
sqlite_writer.initialize_schema(conn)
logger.info("DbImportMultiplexer done initializing")
def AddRun(self, path, name=None):
"""Unsupported; instead use AddRunsFromDirectory."""
raise NotImplementedError("Unsupported; use AddRunsFromDirectory()")
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory.
"""
logger.info("Starting AddRunsFromDirectory: %s (as %s)", path, name)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info("Processing directory %s", subdir)
if subdir not in self._run_loaders:
logger.info("Creating DB loader for directory %s", subdir)
names = self._get_exp_and_run_names(path, subdir, name)
experiment_name, run_name = names
self._run_loaders[subdir] = _RunLoader(
subdir=subdir,
experiment_name=experiment_name,
run_name=run_name,
)
logger.info("Done with AddRunsFromDirectory: %s", path)
def Reload(self):
"""Load events from every detected run."""
logger.info("Beginning DbImportMultiplexer.Reload()")
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = _SqliteWriterEventSink(
self.db_connection_provider
)
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
logger.error("Unable to load run %r: %s", loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
logger.info("Importing runs serially on a single thread")
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
logger.info("Starting %d threads to import runs", num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name="Loader %d" % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
logger.warn("Deleting loader %r", loader.subdir)
del self._run_loaders[loader.subdir]
logger.info("Finished with DbImportMultiplexer.Reload()")
def _get_exp_and_run_names(
self, path, subdir, experiment_name_override=None
):
if experiment_name_override is not None:
return (experiment_name_override, os.path.relpath(subdir, path))
sep = io_wrapper.PathSeparator(path)
path_parts = os.path.relpath(subdir, path).split(sep, 1)
experiment_name = path_parts[0]
run_name = path_parts[1] if len(path_parts) == 2 else "."
return (experiment_name, run_name)
# Struct holding a list of tf.Event serialized protos along with metadata about
# the associated experiment and run.
_EventBatch = collections.namedtuple(
"EventBatch", ["events", "experiment_name", "run_name"]
)
class _RunLoader(object):
"""Loads a single run directory in batches."""
_BATCH_COUNT = 5000
_BATCH_BYTES = 2 ** 20 # 1 MiB
def __init__(self, subdir, experiment_name, run_name):
"""Constructs a `_RunLoader`.
Args:
subdir: string, filesystem path of the run directory
experiment_name: string, name of the run's experiment
run_name: string, name of the run
"""
self._subdir = subdir
self._experiment_name = experiment_name
self._run_name = run_name
self._directory_watcher = directory_watcher.DirectoryWatcher(
subdir,
event_file_loader.RawEventFileLoader,
io_wrapper.IsTensorFlowEventsFile,
)
@property
def subdir(self):
return self._subdir
def load_batches(self):
"""Returns a batched event iterator over the run directory event
files."""
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if (
len(events) >= self._BATCH_COUNT
or event_bytes >= self._BATCH_BYTES
):
break
elapsed = time.time() - start
logger.debug(
"RunLoader.load_batch() yielded in %0.3f sec for %s",
elapsed,
self._subdir,
)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name,
)
@six.add_metaclass(abc.ABCMeta)
class _EventSink(object):
"""Abstract sink for batches of serialized tf.Event data."""
@abc.abstractmethod
def write_batch(self, event_batch):
"""Writes the given event batch to the sink.
Args:
event_batch: an _EventBatch of event data.
"""
raise NotImplementedError()
class _SqliteWriterEventSink(_EventSink):
"""Implementation of EventSink using SqliteWriter."""
def __init__(self, db_connection_provider):
"""Constructs a SqliteWriterEventSink.
Args:
db_connection_provider: Provider function for creating a DB connection.
"""
self._writer = sqlite_writer.SqliteWriter(db_connection_provider)
def write_batch(self, event_batch):
start = time.time()
tagged_data = {}
for event_proto in event_batch.events:
event = event_pb2.Event.FromString(event_proto)
self._process_event(event, tagged_data)
if tagged_data:
self._writer.write_summaries(
tagged_data,
experiment_name=event_batch.experiment_name,
run_name=event_batch.run_name,
)
elapsed = time.time() - start
logger.debug(
"SqliteWriterEventSink.WriteBatch() took %0.3f sec for %s events",
elapsed,
len(event_batch.events),
)
def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
event_type = event.WhichOneof("what")
# Handle the most common case first.
if event_type == "summary":
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(
value.tag, (None, None, [])
)
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values
)
elif event_type == "file_version":
pass # TODO: reject file version < 2 (at loader level)
elif event_type == "session_log":
if event.session_log.status == event_pb2.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ("graph_def", "meta_graph_def"):
pass # TODO: support graphs
elif event_type == "tagged_run_metadata":
pass # TODO: support run metadata
|
dsr_service_tool_simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import path : DSR_ROBOT.py
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
def shutdown():
print("shutdown time!")
print("shutdown time!")
print("shutdown time!")
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" actual_mode : %d" % (msg.actual_mode))
print(" actual_space : %d" % (msg.actual_space))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
print(" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))
print(" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))
print(" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))
print(" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))
print(" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5]))
print(" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))
print(" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))
print(" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))
print(" solution_space : %d" % (msg.solution_space))
sys.stdout.write(" rotation_matrix : ")
for i in range(0 , 3):
sys.stdout.write( "dim : [%d]"% i)
sys.stdout.write(" [ ")
for j in range(0 , 3):
sys.stdout.write("%d " % msg.rotation_matrix[i].data[j])
sys.stdout.write("] ")
print ##end line
print(" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))
print(" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))
print(" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))
print(" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))
print(" sync_time : %7.3f" % (msg.sync_time))
print(" actual_bk : %d %d %d %d %d %d" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))
print(" actual_bt : %d %d %d %d %d " % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))
print(" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))
print(" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))
#print digital i/o
sys.stdout.write(" ctrlbox_digital_input : ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_input[i])
print ##end line
sys.stdout.write(" ctrlbox_digital_output: ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_output[i])
print
sys.stdout.write(" flange_digital_input : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_input[i])
print
sys.stdout.write(" flange_digital_output : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_output[i])
print
#print modbus i/o
sys.stdout.write(" modbus_state : " )
if len(msg.modbus_state) > 0:
for i in range(0 , len(msg.modbus_state)):
sys.stdout.write("[" + msg.modbus_state[i].modbus_symbol)
sys.stdout.write(", %d] " % msg.modbus_state[i].modbus_value)
print
print(" access_control : %d" % (msg.access_control))
print(" homming_completed : %d" % (msg.homming_completed))
print(" tp_initialized : %d" % (msg.tp_initialized))
print(" mastering_need : %d" % (msg.mastering_need))
print(" drl_stopped : %d" % (msg.drl_stopped))
print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('dsr_service_motion_simple_py')
rospy.on_shutdown(shutdown)
#t1 = threading.Thread(target=thread_subscriber)
#t1.daemon = True
#t1.start()
#set_tcp = rospy.ServiceProxy('/' + ROBOT_ID + ROBOT_MODEL + '/tcp/set_current_tcp', SetCurrentTcp)
#get_tool = rospy.ServiceProxy('/'+ROBOT_ID +ROBOT_MODEL+ '/tool/get_current_tool', GetCurrentTool)
#get_tcp = rospy.ServiceProxy('/'+ROBOT_ID +ROBOT_MODEL+ '/tcp/get_current_tcp', GetCurrentTcp)
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
fCog = [10.0, 10.0, 10.0]
finertia = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
p0 =[0, 0, 0, 0, 0, 0]
add_tcp("tcp1", p0)
add_tool("tool1", 5.3, fCog, finertia)
set_tool("tool1")
set_tcp("tcp1")
print(get_tool())
print(get_tcp())
while not rospy.is_shutdown():
pass
print('good bye!')
|
server.py
|
#!/usr/bin/env python
"""
Dummy server used for unit testing.
"""
from __future__ import print_function
import logging
import os
import random
import string
import sys
import threading
import socket
import warnings
import ssl
from datetime import datetime
from urllib3.exceptions import HTTPWarning
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.web
log = logging.getLogger(__name__)
CERTS_PATH = os.path.join(os.path.dirname(__file__), "certs")
DEFAULT_CERTS = {
"certfile": os.path.join(CERTS_PATH, "server.crt"),
"keyfile": os.path.join(CERTS_PATH, "server.key"),
"cert_reqs": ssl.CERT_OPTIONAL,
"ca_certs": os.path.join(CERTS_PATH, "cacert.pem"),
}
CLIENT_INTERMEDIATE_PEM = "client_intermediate.pem"
CLIENT_NO_INTERMEDIATE_PEM = "client_no_intermediate.pem"
CLIENT_INTERMEDIATE_KEY = "client_intermediate.key"
CLIENT_CERT = os.path.join(CERTS_PATH, CLIENT_INTERMEDIATE_PEM)
PASSWORD_KEYFILE = os.path.join(CERTS_PATH, "server_password.key")
PASSWORD_CLIENT_KEYFILE = os.path.join(CERTS_PATH, "client_password.key")
IP_SAN_CERTS = {
"certfile": os.path.join(CERTS_PATH, "server.ip_san.crt"),
"keyfile": DEFAULT_CERTS["keyfile"],
}
IPV6_ADDR_CERTS = {
"certfile": os.path.join(CERTS_PATH, "server.ipv6addr.crt"),
"keyfile": os.path.join(CERTS_PATH, "server.ipv6addr.key"),
}
IPV6_SAN_CERTS = {
"certfile": os.path.join(CERTS_PATH, "server.ipv6_san.crt"),
"keyfile": DEFAULT_CERTS["keyfile"],
}
DEFAULT_CA = os.path.join(CERTS_PATH, "cacert.pem")
DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key")
DEFAULT_CA_BAD = os.path.join(CERTS_PATH, "client_bad.pem")
IPV6_ADDR_CA = os.path.join(CERTS_PATH, "server.ipv6addr.crt")
IPV6_SAN_CA = os.path.join(CERTS_PATH, "server.ipv6_san.crt")
COMBINED_CERT_AND_KEY = os.path.join(CERTS_PATH, "server.combined.pem")
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/urllib3/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
# Some systems may have IPv6 support but DNS may not be configured
# properly. We can not count that localhost will resolve to ::1 on all
# systems. See https://github.com/urllib3/urllib3/pull/611 and
# https://bugs.python.org/issue18792
HAS_IPV6_AND_DNS = _has_ipv6("localhost")
HAS_IPV6 = _has_ipv6("::1")
# Different types of servers we have:
class NoIPv6Warning(HTTPWarning):
"IPv6 is not available"
pass
class SocketServerThread(threading.Thread):
"""
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
USE_IPV6 = HAS_IPV6_AND_DNS
def __init__(self, socket_handler, host="localhost", port=8081, ready_event=None):
threading.Thread.__init__(self)
self.daemon = True
self.socket_handler = socket_handler
self.host = host
self.ready_event = ready_event
def _start_server(self):
if self.USE_IPV6:
sock = socket.socket(socket.AF_INET6)
else:
warnings.warn("No IPv6 support. Falling back to IPv4.", NoIPv6Warning)
sock = socket.socket(socket.AF_INET)
if sys.platform != "win32":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
# Once listen() returns, the server socket is ready
sock.listen(1)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def run(self):
self.server = self._start_server()
def run_tornado_app(app, io_loop, certs, scheme, host):
assert io_loop == tornado.ioloop.IOLoop.current()
# We can't use fromtimestamp(0) because of CPython issue 29097, so we'll
# just construct the datetime object directly.
app.last_req = datetime(1970, 1, 1)
if scheme == "https":
http_server = tornado.httpserver.HTTPServer(app, ssl_options=certs)
else:
http_server = tornado.httpserver.HTTPServer(app)
sockets = tornado.netutil.bind_sockets(None, address=host)
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
def run_loop_in_thread(io_loop):
t = threading.Thread(target=io_loop.start)
t.start()
return t
def get_unreachable_address():
while True:
host = "".join(random.choice(string.ascii_lowercase) for _ in range(60))
sockaddr = (host, 54321)
# check if we are really "lucky" and hit an actual server
try:
s = socket.create_connection(sockaddr)
except socket.error:
return sockaddr
else:
s.close()
if __name__ == "__main__":
# For debugging dummyserver itself - python -m dummyserver.server
from .testcase import TestingApp
host = "127.0.0.1"
io_loop = tornado.ioloop.IOLoop.current()
app = tornado.web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, None, "http", host)
server_thread = run_loop_in_thread(io_loop)
print("Listening on http://{host}:{port}".format(host=host, port=port))
|
base.py
|
#
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The experiment driver implements the functionality for scheduling trials on
maggy.
"""
import queue
import threading
import json
import os
import secrets
import time
from abc import ABC, abstractmethod
from datetime import datetime
from hops import constants as hopsconstants
from hops import hdfs as hopshdfs
from hops import util as hopsutil
from hops.experiment_impl.util import experiment_utils
from maggy import util
from maggy.core import rpc
from maggy.trial import Trial
from maggy.earlystop import NoStoppingRule
driver_secret = None
class Driver(ABC):
SECRET_BYTES = 8
def __init__(
self, name, description, direction, num_executors, hb_interval, log_dir
):
global driver_secret
# COMMON EXPERIMENT SETUP
# Functionality inits
self._final_store = []
self._trial_store = {}
self.num_executors = num_executors
self._message_q = queue.Queue()
self.experiment_done = False
self.worker_done = False
self.hb_interval = hb_interval
self.server = rpc.Server(self.num_executors)
if not driver_secret:
driver_secret = self._generate_secret(self.SECRET_BYTES)
self._secret = driver_secret
self.executor_logs = ""
self.maggy_log = ""
self.log_lock = threading.RLock()
self.log_file = log_dir + "/maggy.log"
self.log_dir = log_dir
self.exception = None
if isinstance(direction, str) and direction.lower() in ["min", "max"]:
self.direction = direction.lower()
else:
raise Exception(
"The experiment's direction should be an string (either 'min' or 'max') "
"but it is {0} (of type '{1}').".format(
str(direction), type(direction).__name__
)
)
# Open File desc for HDFS to log
if not hopshdfs.exists(self.log_file):
hopshdfs.dump("", self.log_file)
self.fd = hopshdfs.open_file(self.log_file, flags="w")
# overwritten for optimization
self.es_interval = None
self.es_min = None
# Metadata
self.name = name
self.description = description
def init(self, job_start):
self.server_addr = self.server.start(self)
self.job_start = job_start
self._start_worker()
@abstractmethod
def prep_results(self):
pass
def finalize(self, job_end):
self.job_end = job_end
self.duration = experiment_utils._seconds_to_milliseconds(
self.job_end - self.job_start
)
self.duration_str = experiment_utils._time_diff(self.job_start, self.job_end)
results = self.prep_results()
print(results)
self._log(results)
hopshdfs.dump(
json.dumps(self.result, default=util.json_default_numpy),
self.log_dir + "/result.json",
)
sc = hopsutil._find_spark().sparkContext
hopshdfs.dump(self.json(sc), self.log_dir + "/maggy.json")
return self.result
def get_trial(self, trial_id):
return self._trial_store[trial_id]
def add_trial(self, trial):
self._trial_store[trial.trial_id] = trial
def add_message(self, msg):
self._message_q.put(msg)
@abstractmethod
def controller_get_next(self, trial=None):
# TODO this won't be necessary if ablator and optimizer implement same
# interface
pass
def _start_worker(self):
def _target_function(self):
try:
while not self.worker_done:
trial = None
# get a message
try:
msg = self._message_q.get_nowait()
except queue.Empty:
msg = {"type": None}
# depending on message do the work
# 1. METRIC
if msg["type"] == "METRIC":
# append executor logs if in the message
logs = msg.get("logs", None)
if logs is not None:
with self.log_lock:
self.executor_logs = self.executor_logs + logs
step = None
if msg["trial_id"] is not None and msg["data"] is not None:
step = self.get_trial(msg["trial_id"]).append_metric(
msg["data"]
)
# maybe these if statements should be in a function
# also this could be made a separate message
# i.e. step nr is added to the queue as message which will
# then later be checked for early stopping, just to not
# block for too long for other messages
if self.earlystop_check != NoStoppingRule.earlystop_check:
if len(self._final_store) > self.es_min:
if step is not None and step != 0:
if step % self.es_interval == 0:
try:
to_stop = self.earlystop_check(
self.get_trial(msg["trial_id"]),
self._final_store,
self.direction,
)
except Exception as e:
self._log(e)
to_stop = None
if to_stop is not None:
self._log(
"Trials to stop: {}".format(to_stop)
)
self.get_trial(to_stop).set_early_stop()
# 2. BLACKLIST the trial
elif msg["type"] == "BLACK":
trial = self.get_trial(msg["trial_id"])
with trial.lock:
trial.status = Trial.SCHEDULED
self.server.reservations.assign_trial(
msg["partition_id"], msg["trial_id"]
)
# 3. FINAL
elif msg["type"] == "FINAL":
# set status
# get trial only once
trial = self.get_trial(msg["trial_id"])
logs = msg.get("logs", None)
if logs is not None:
with self.log_lock:
self.executor_logs = self.executor_logs + logs
# finalize the trial object
with trial.lock:
trial.status = Trial.FINALIZED
trial.final_metric = msg["data"]
trial.duration = experiment_utils._seconds_to_milliseconds(
time.time() - trial.start
)
# move trial to the finalized ones
self._final_store.append(trial)
self._trial_store.pop(trial.trial_id)
# update result dictionary
self._update_result(trial)
# keep for later in case tqdm doesn't work
self.maggy_log = self._update_maggy_log()
self._log(self.maggy_log)
hopshdfs.dump(
trial.to_json(),
self.log_dir + "/" + trial.trial_id + "/trial.json",
)
# assign new trial
trial = self.controller_get_next(trial)
if trial is None:
self.server.reservations.assign_trial(
msg["partition_id"], None
)
self.experiment_done = True
elif trial == "IDLE":
self.add_message(
{
"type": "IDLE",
"partition_id": msg["partition_id"],
"idle_start": time.time(),
}
)
self.server.reservations.assign_trial(
msg["partition_id"], None
)
else:
with trial.lock:
trial.start = time.time()
trial.status = Trial.SCHEDULED
self.server.reservations.assign_trial(
msg["partition_id"], trial.trial_id
)
self.add_trial(trial)
# 4. Let executor be idle
elif msg["type"] == "IDLE":
# execute only every 0.1 seconds but do not block thread
if time.time() - msg["idle_start"] > 0.1:
trial = self.controller_get_next()
if trial is None:
self.server.reservations.assign_trial(
msg["partition_id"], None
)
self.experiment_done = True
elif trial == "IDLE":
# reset timeout
msg["idle_start"] = time.time()
self.add_message(msg)
else:
with trial.lock:
trial.start = time.time()
trial.status = Trial.SCHEDULED
self.server.reservations.assign_trial(
msg["partition_id"], trial.trial_id
)
self.add_trial(trial)
else:
self.add_message(msg)
# 4. REG
elif msg["type"] == "REG":
trial = self.controller_get_next()
if trial is None:
self.server.reservations.assign_trial(
msg["partition_id"], None
)
self.experiment_done = True
elif trial == "IDLE":
# reset timeout
msg["idle_start"] = time.time()
self.add_message(msg)
else:
with trial.lock:
trial.start = time.time()
trial.status = Trial.SCHEDULED
self.server.reservations.assign_trial(
msg["partition_id"], trial.trial_id
)
self.add_trial(trial)
except Exception as exc:
# Exception can't be propagated to parent thread
# therefore log the exception and fail experiment
self._log(exc)
self.exception = exc
self.server.stop()
t = threading.Thread(target=_target_function, args=(self,))
t.daemon = True
t.start()
def stop(self):
"""Stop the Driver's worker thread and server."""
self.worker_done = True
self.server.stop()
self.fd.flush()
self.fd.close()
@abstractmethod
def config_to_dict(self):
pass
def json(self, sc):
"""Get all relevant experiment information in JSON format.
"""
user = None
if hopsconstants.ENV_VARIABLES.HOPSWORKS_USER_ENV_VAR in os.environ:
user = os.environ[hopsconstants.ENV_VARIABLES.HOPSWORKS_USER_ENV_VAR]
experiment_json = {
"project": hopshdfs.project_name(),
"user": user,
"name": self.name,
"module": "maggy",
"app_id": str(sc.applicationId),
"start": time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(self.job_start)),
"memory_per_executor": str(sc._conf.get("spark.executor.memory")),
"gpus_per_executor": str(sc._conf.get("spark.executor.gpus")),
"executors": self.num_executors,
"logdir": self.log_dir,
# 'versioned_resources': versioned_resources,
"description": self.description,
"experiment_type": self.controller.name(),
}
experiment_json["controller"] = self.controller.name()
experiment_json["config"] = json.dumps(self.config_to_dict())
if self.experiment_done:
experiment_json["status"] = "FINISHED"
experiment_json["finished"] = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.localtime(self.job_end)
)
experiment_json["duration"] = self.duration
experiment_json["config"] = json.dumps(self.result["best_config"])
experiment_json["metric"] = self.result["best_val"]
else:
experiment_json["status"] = "RUNNING"
return json.dumps(experiment_json, default=util.json_default_numpy)
def _generate_secret(self, nbytes):
"""Generates a secret to be used by all clients during the experiment
to authenticate their messages with the experiment driver.
"""
return secrets.token_hex(nbytes=nbytes)
def _update_result(self, trial):
"""Given a finalized trial updates the current result's best and
worst trial.
"""
metric = trial.final_metric
param_string = trial.params
trial_id = trial.trial_id
num_epochs = len(trial.metric_history)
# pop function values and trial_type from parameters, since we don't need them
param_string.pop("dataset_function", None)
param_string.pop("model_function", None)
# First finalized trial
if self.result.get("best_id", None) is None:
self.result = {
"best_id": trial_id,
"best_val": metric,
"best_config": param_string,
"worst_id": trial_id,
"worst_val": metric,
"worst_config": param_string,
"avg": metric,
"metric_list": [metric],
"num_trials": 1,
"early_stopped": 0,
"num_epochs": num_epochs,
"trial_id": trial_id,
}
if trial.early_stop:
self.result["early_stopped"] += 1
return
else:
if self.direction == "max":
if metric > self.result["best_val"]:
self.result["best_val"] = metric
self.result["best_id"] = trial_id
self.result["best_config"] = param_string
if metric < self.result["worst_val"]:
self.result["worst_val"] = metric
self.result["worst_id"] = trial_id
self.result["worst_config"] = param_string
elif self.direction == "min":
if metric < self.result["best_val"]:
self.result["best_val"] = metric
self.result["best_id"] = trial_id
self.result["best_config"] = param_string
if metric > self.result["worst_val"]:
self.result["worst_val"] = metric
self.result["worst_id"] = trial_id
self.result["worst_config"] = param_string
# update results and average regardless of experiment type
self.result["metric_list"].append(metric)
self.result["num_trials"] += 1
self.result["avg"] = sum(self.result["metric_list"]) / float(
len(self.result["metric_list"])
)
if trial.early_stop:
self.result["early_stopped"] += 1
@abstractmethod
def log_string(self):
pass
def _update_maggy_log(self):
"""Creates the status of a maggy experiment with a progress bar.
"""
return self.log_string()
def _get_logs(self):
"""Return current experiment status and executor logs to send them to
spark magic.
"""
with self.log_lock:
temp = self.executor_logs
# clear the executor logs since they are being sent
self.executor_logs = ""
return self.result, temp
def _log(self, log_msg):
"""Logs a string to the maggy driver log file.
"""
msg = datetime.now().isoformat() + ": " + str(log_msg)
self.fd.write((msg + "\n").encode())
|
sentiment_predict.py
|
import sys,os
sys.path.append("..")
import numpy as np
import tensorflow as tf
from example import bert_classifier_estimator
from bunch import Bunch
from data_generator import tokenization
from data_generator import tf_data_utils
from model_io import model_io
from example import feature_writer, write_to_tfrecords, classifier_processor
import json
from data_generator import tokenization
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def full2half(s):
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
num = chr(num)
n.append(num)
return ''.join(n)
from queue import Queue
class InferAPI(object):
def __init__(self, config):
self.config = config
def load_label_dict(self):
with open(self.config["label2id"], "r") as frobj:
self.label_dict = json.load(frobj)
def init_model(self):
self.graph = tf.Graph()
with self.graph.as_default():
init_checkpoint = self.config["init_checkpoint"]
bert_config = json.load(open(self.config["bert_config"], "r"))
self.model_config = Bunch(bert_config)
self.model_config.use_one_hot_embeddings = True
self.model_config.scope = "bert"
self.model_config.dropout_prob = 0.1
self.model_config.label_type = "single_label"
self.input_queue = Queue(maxsize=self.config.get("batch_size", 20))
self.output_queue = Queue(maxsize=self.config.get("batch_size", 20))
opt_config = Bunch({"init_lr":2e-5, "num_train_steps":1e30, "cycle":False})
model_io_config = Bunch({"fix_lm":False})
self.num_classes = len(self.label_dict["id2label"])
self.max_seq_length = self.config["max_length"]
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.config["bert_vocab"],
do_lower_case=True)
self.sess = tf.Session()
self.model_io_fn = model_io.ModelIO(model_io_config)
model_fn = bert_classifier_estimator.classifier_model_fn_builder(
self.model_config,
self.num_classes,
init_checkpoint,
reuse=None,
load_pretrained=True,
model_io_fn=self.model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config)
self.estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=self.config["model_dir"])
def get_input_features(self, sent_lst):
input_ids_lst, input_mask_lst, segment_ids_lst = [], [], []
label_ids_lst = []
for sent in sent_lst:
sent = full2half(sent)
tokens_a = self.tokenizer.tokenize(sent)
if len(tokens_a) > self.max_seq_length - 2:
tokens_a = tokens_a[0:(self.max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids = 0
input_ids_lst.append(input_ids)
input_mask_lst.append(input_mask)
segment_ids_lst.append(segment_ids)
label_ids_lst.append(label_ids)
return {"input_ids":np.array(input_ids_lst).astype(np.int32),
"input_mask":np.array(input_mask_lst).astype(np.int32),
"segment_ids":np.array(segment_ids_lst).astype(np.int32),
"label_ids":np.array(label_ids_lst).astype(np.int32)}
def input_fn(self, input_features):
dataset = tf.data.Dataset.from_tensor_slices(input_features)
dataset = dataset.batch(self.config.get("batch_size", 20))
# iterator = dataset.make_one_shot_iterator()
# features = iterator.get_next()
return dataset
def generate_from_queue(self):
""" Generator which yields items from the input queue.
This lives within our 'prediction thread'.
"""
while True:
yield self.input_queue.get()
def predict_from_queue(self):
""" Adds a prediction from the model to the output_queue.
This lives within our 'prediction thread'.
Note: estimators accept generators as inputs and return generators as output.
Here, we are iterating through the output generator, which will be
populated in lock-step with the input generator.
"""
for i in self.estimator.predict(input_fn=self.queued_predict_input_fn):
print(i)
print('Putting in output queue')
print("===========")
self.output_queue.put(i)
def queued_predict_input_fn(self):
"""
Queued version of the `predict_input_fn` in FlowerClassifier.
Instead of yielding a dataset from data as a parameter,
we construct a Dataset from a generator,
which yields from the input queue.
"""
# Fetch the inputs from the input queue
output_types = {'input_ids': tf.int32,
'input_mask': tf.int32,
'segment_ids': tf.int32,
'label_ids': tf.int32}
output_shapes = {'input_ids': [None, self.max_seq_length ],
'input_mask': [None, self.max_seq_length ],
'segment_ids': [None, self.max_seq_length ],
'label_ids': [None,]}
dataset = tf.data.Dataset.from_generator(self.generate_from_queue, output_types=output_types, output_shapes=output_shapes)
#dataset = dataset.batch(self.config.get("batch_size", 20))
return dataset
def predict(self, sent_lst):
# Get predictions dictionary
features = dict(self.get_input_features(sent_lst))
print("call api", self.input_queue.qsize())
print("call api", self.output_queue.qsize())
self.input_queue.put(features)
print("call api", self.input_queue.qsize())
predictions = self.output_queue.get() # The latest predictions generator
return predictions
def get_all_queue_result(self, queue):
result_list = []
while not queue.empty():
result_list.append(queue.get())
return result_list
def predict_single(self, sent_lst):
# Get predictions dictionary
features = dict(self.get_input_features(sent_lst))
self.input_queue.put(features)
predictions = self.output_queue.get() # The latest predictions generator
predictions["label"] = self.label_dict["id2label"][str(predictions["pred_label"])]
return predictions
def predict_batch(self, sen_lst):
return [self.predict_single([sent]) for sent in sen_lst]
def infer(self, sent_lst):
with self.graph.as_default():
for result in self.estimator.predict(input_fn=lambda: self.input_fn(input_features)):
print(result)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
model_config = {
"label2id":"/data/xuht/eventy_detection/sentiment/model/bert/label2id.json",
"init_checkpoint":"/data/xuht/eventy_detection/sentiment/model/bert/sentiment.ckpt",
"bert_config":"/data/xuht/bert/chinese_L-12_H-768_A-12/bert_config.json",
"max_length":128,
"bert_vocab":"/data/xuht/bert/chinese_L-12_H-768_A-12/vocab.txt",
"model_dir":"/data/xuht/eventy_detection/sentiment/model/bert"
}
api = InferAPI(model_config)
api.load_label_dict()
api.init_model()
from threading import Thread
t = Thread(target=api.predict_from_queue, daemon=True)
t.start()
import tornado.ioloop
import tornado.web
import tornado.httpserver
import json
class PredictHandler(tornado.web.RequestHandler):
def post(self):
body = json.loads(self.request.body.decode(), encoding="utf-8")
sentences = body.get("sentences")
result = api.predict_batch(sentences)
result = [[int(row['label']) for row in result],[float(row['max_prob']) for row in result]]
print(result)
return self.write(json.dumps({"code":200, "data":result}, ensure_ascii=False))
def main():
application = tornado.web.Application([(r"/sentiment",PredictHandler),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.bind(9883)
http_server.start()
print("-------------server start-----------------")
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
example_chart_mac_windows.py
|
import Aidlab
from Aidlab.Signal import Signal
import numpy as np
from multiprocessing import Process, Queue, Array
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
buffer_size = 500
result = None
x = [i for i in range(buffer_size)]
y = []
figure = pyplot.figure()
axis = figure.add_subplot(1, 1, 1)
def animate(i):
global y
axis.clear()
axis.plot(x, y)
pyplot.ylim([np.min(y) - np.std(y), np.max(y) + np.std(y)])
def chart(result):
global y
y = result
ani = animation.FuncAnimation(figure, animate, interval=2)
pyplot.show()
class MainManager(Aidlab.Aidlab):
def __init__(self):
super().__init__()
self.sample_index = 0
def did_connect(self, aidlab):
print("Connected to: ", aidlab.address)
def did_disconnect(self, aidlab):
print("Disconnected from: ", aidlab.address)
def did_receive_ecg(self, aidlab, timestamp, values):
global result, buffer_size
self.sample_index += 1
result[self.sample_index % buffer_size] = values[0]
if __name__ == '__main__':
# create process for Plot
result = Array('d', buffer_size)
Process(target=chart, args=(result,)).start()
signals = [Signal.ecg]
main_manager = MainManager()
main_manager.connect(signals)
# Start the connection
while True:
pass
|
RomitiDj.py
|
import spotipy
import random
import re
import telegram
import schedule
import threading
import time
import pickle
import config
import os.path
import logging
from spotipy.oauth2 import SpotifyClientCredentials
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# YOUTUBE API #
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
DEVELOPER_KEY = config.credentials['google_dev']
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
VIDEO = 'https://www.youtube.com/watch?v='
def youtube_search(title):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# Call the search.list method to retrieve results matching the specified
# query term.
print('asking youtube', title)
search_response = youtube.search().list(
q=title,
part="id,snippet",
maxResults=1,
type='video',).execute()
try:
id = search_response['items'][0]['id']['videoId']
return VIDEO+id
except IndexError:
logger.error('Can\'t find string {:s} on YOUTUBE'.format(title))
return False
# ##################################### #
audio_format = ('.mp3', '.wav')
music = 'musicList.txt'
black_list = ('(Video )')
client_credentials_manager = SpotifyClientCredentials()
spotify = spotipy.Spotify(
client_credentials_manager=client_credentials_manager)
if os.path.isfile('users.pkl'):
USERS = pickle.load(open('users.pkl', 'rb+'))
else:
USERS = set([])
with open(music, 'r') as file:
songs = file.read().splitlines()
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
fh = logging.FileHandler('bot.log')
formatter = logging.Formatter('%(levelname)s:%(asctime)s:%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def clear_title(song):
song = song.replace('_', ' ')
m = re.match('([^A-Z,a-z,0-9]+)(.*)\.', song)
return m.group(2).lower()
def random_song(songs, num_songs):
drawed = 0
selected = []
total = len(songs)
while drawed < num_songs:
n = random.randint(0, total-1)
song = songs[n]
if song.endswith(audio_format):
drawed += 1
selected.append(clear_title(song))
return selected
def ask_spotify(song_list, spotify):
for song in song_list:
print(song)
results = spotify.search(q=song, type='track')
try:
s = results['tracks']['items'][0]['external_urls']['spotify']
print(s)
except IndexError:
logger.warning('Can\'t find string: {:s}'.format(song))
try:
s = youtube_search(song)
print(s)
except (HttpError, e):
print("An HTTP error {} occurred:\n{}".format(
e.resp.status, e.content))
return s
def start(bot, update):
user = update.message['chat']['id']
USERS.add(user)
bot.send_message(
chat_id=update.message.chat_id, text='Bishop is the answer!')
print(USERS)
pickle.dump(USERS, open('users.pkl', 'wb+'))
def stop(bot, update):
user = update.message['chat']['id']
USERS.remove(user)
print(USERS)
pickle.dump(USERS, open('users.pkl', 'wb+'))
def bobby(bot, update):
song = False
while not song:
song = ask_spotify(random_song(songs, 1), spotify)
bot.send_message(chat_id=update.message.chat_id, text=song)
def test(bot, update):
print(USERS)
def stupid_job(bot):
song = False
while not song:
song = ask_spotify(random_song(songs, 1), spotify)
song = 'It\'s Bobby time!\n' + song
for user in USERS:
try:
bot.send_message(chat_id=user, text=song)
except Exception as e:
print(e)
print('Can\'t send message to user {:d}'.format(user))
def daily_song(bot):
schedule.every().day.at('11:00').do(stupid_job, bot)
while True:
schedule.run_pending()
time.sleep(1)
def help(bot, update):
help_msg = 'Supported commands:\n/bobby to get a daily song\n/help ' + \
'to see this message\n/start to subscribe to the ' + \
'automatic daily song\n/stop to unsubscribe'
bot.send_message(chat_id=update.message.chat_id, text=help_msg)
def unknown(bot, update):
msg = 'Unsupported command, try /help for more information'
bot.send_message(chat_id=update.message.chat_id, text=msg)
def main():
print(USERS)
bot = telegram.Bot(config.credentials['telegram_bot'])
updater = Updater(config.credentials['telegram_bot'])
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CommandHandler('stop', stop))
updater.dispatcher.add_handler(CommandHandler('bobby', bobby))
updater.dispatcher.add_handler(CommandHandler('test', test))
updater.dispatcher.add_handler(CommandHandler('help', help))
unknown_handler = MessageHandler(Filters.command, unknown)
updater.dispatcher.add_handler(unknown_handler)
t = threading.Thread(target=daily_song, args=(bot,), daemon=True)
t.start()
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
events.py
|
"""
This handler listens for TCP/Unix connections from the localhost and from this
agent sending us events. We will forward those events to the CorkAPI.
"""
from . import BaseHandler
from dart.common.settings import SettingsManager
from dart.common.exceptions import EventValidationException
import dart.agent.api
from threading import Thread
import socketserver
import requests
import traceback
import json
import os
class EventHandler(BaseHandler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# configure settings
self.settings = SettingsManager()
self.enabled = self.settings.get("agent.events.enabled", False)
# if not enabled then we will not turn on any listeners
if (not self.enabled):
self.logger.warning("{} handler is disabled".format(self.name))
return
self.listen_port = int(self.settings.get("agent.events.port", 1337))
self.listen_path = self.settings.get("agent.events.path", "/run/events.sock")
# where we are listening
self.logger.info("{} handler listening for events on port {}".format(self.name, self.listen_port))
self.logger.info("{} handler listening for events at path {}".format(self.name, self.listen_path))
class TCPRequestServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# faster re-binding
allow_reuse_address = True
# make this bigger than five
request_queue_size = 10
# kick connections when we exit
daemon_threads = True
class UnixStreamRequestServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer):
# faster re-binding
allow_reuse_address = True
# make this bigger than five
request_queue_size = 10
# kick connections when we exit
daemon_threads = True
class TCPRequestHandler(socketserver.StreamRequestHandler):
def handle(subself):
try:
data = subself.rfile.readline().strip()
self.logger.debug("{} handler received TCP event from {}:{}".format(self.name, subself.client_address[0], subself.client_address[1]))
try:
data = self._validate(data)
self.events.put(data)
except EventValidationException:
pass
except BrokenPipeError:
self.logger.debug("{} handler broken TCP pipe from {}:{}".format(self.name, subself.client_address[0], subself.client_address[1]))
class UnixStreamRequestHandler(socketserver.StreamRequestHandler):
def handle(subself):
try:
data = subself.rfile.readline().strip()
self.logger.debug("{} handler received Unix event".format(self.name))
try:
data = self._validate(data)
self.events.put(data)
except EventValidationException:
pass
except BrokenPipeError:
self.logger.debug("{} handler broken pipe over Unix socket".format(self.name))
# this is the server. it handles the sockets. it passes requests to the
# listener (the second argument). the server will run in its own thread
# so that we can kill it when we need to
self.tcp_server = TCPRequestServer(("127.0.0.1", self.listen_port), TCPRequestHandler)
# note that we're just removing whatever socket is already there. this
# can be dangerous if something is still using the old socket. but it
# is worse if our new process doesn't start.
try:
os.unlink(self.listen_path)
except FileNotFoundError as e:
self.logger.debug("{} handler could not remove {}: {}".format(self.name, self.listen_path, e))
# then create the unix socket server and fix the permissions
self.unix_server = UnixStreamRequestServer(self.listen_path, UnixStreamRequestHandler)
os.chmod(self.listen_path, 0o777)
@property
def name(self):
return "events"
def start(self):
if (self.enabled):
self.tcp_thread = Thread(target=self._run_tcp)
self.tcp_thread.start()
self.unix_thread = Thread(target=self._run_unix)
self.unix_thread.start()
# always run the queue so that events generated by dart can be
# processed and dumped on the floor.
self.events_thread = Thread(target=self._run_queue)
self.events_thread.start()
def stop(self):
self.logger.info("{} handler received signal to stop".format(self.name))
if (self.enabled):
# tell the servers to stop
self.tcp_server.shutdown()
self.unix_server.shutdown()
# then wait for the threads to finish
self.tcp_thread.join()
self.unix_thread.join()
# try to clean up our unix socket
try:
os.remove(self.listen_path)
except FileNotFoundError as e:
self.logger.warning("{} handler could not remove {}: {}".format(self.name, self.listen_path, e))
except Exception as e:
self.logger.error("{} handler could not remove {}: {}".format(self.name, self.listen_path, e))
# always listen for the queue handler to stop
self.events.put(None)
self.events.join()
self.events_thread.join()
def can_handle(self, event_type):
# this handler wants nothing from supervisor
return False
def handle(self, event_type, event, data):
# we never get passed anything to handle so we can't handle anything
pass
# this runs inside a thread
def _run_tcp(self):
try:
# clear first. any error will be reraised
self.events.put({
"data": {
"component": {"name": "agent:{}:tcp-listener".format(self.name)},
"severity": "OK",
"message": "clear",
}
})
# try to start the server. this will block but we're in a thread.
server_address = self.tcp_server.server_address
self.logger.info("{} handler starting TCP server on {}:{}".format(self.name, server_address[0], server_address[1]))
self.tcp_server.serve_forever()
except Exception as e:
subject = "could not create TCP event listener on {}: {}".format(self.fqdn, e)
message = traceback.format_exc()
self.logger.error("{} handler {}".format(self.name, subject))
self.logger.error(message)
self.events.put({
"data": {
"component": {"name": "agent:{}:tcp-listener".format(self.name)},
"severity": 2, # high severity
"title": subject,
"message": message,
}
})
# this runs inside of a thread
def _run_unix(self):
try:
# clear first. any error will be reraised
self.events.put({
"data": {
"component": {"name": "agent:{}:unix-listener".format(self.name)},
"severity": "OK",
"message": "clear",
}
})
# try to start the server. this will block but we're in a thread.
server_address = self.unix_server.server_address
self.logger.info("{} handler starting Unix server at {}".format(self.name, server_address))
self.unix_server.serve_forever()
except Exception as e:
subject = "could not create Unix event listener on {}: {}".format(self.fqdn, e)
message = traceback.format_exc()
self.logger.error("{} handler {}".format(self.name, subject))
self.logger.error(message)
self.events.put({
"environment": self.environment,
"data": {
"component": {"name": "agent:{}:unix-listener".format(self.name)},
"severity": 2, # high severity
"title": subject,
"message": message,
}
})
def _run_queue(self):
# loop forever -- after getting a message off of the queue on which we
# are listening we send it to the CorkAPI and then wait for more.
while (True):
item = None
try:
# this will block while waiting for messages to appear on the
# local thread queue. we immediately acknowledge it off of the
# local thread queue so that if there is an exception then we
# can put it back on the queue.
item = self.events.get()
self.events.task_done()
# if "None" is put on the queue then we are to stop listening
# to the queue. this happens when someone calls the ".stop"
# method to this class.
if (item is None):
self.logger.info("{} handler cleaning up before exit".format(self.name))
break
# if the events handler is not enabled then do no processing
if (not self.enabled):
continue
# get some pieces out of the event that was enqueued
event_data = item["data"]
event_type = item.get("type", "event") # determines API endpoint
# if no hostname is provided then fill ours in
if ("host" not in event_data or event_data["host"] is None):
event_data["host"] = {}
if (not isinstance(event_data["host"], dict)):
event_data["host"] = {}
if ("name" not in event_data["host"] or event_data["host"]["name"] is None):
event_data["host"]["name"] = self.fqdn
# if no configuration item then set a default. this logic is
# used by all of the internal dart monitoring stuff.
if ("ci" not in event_data or event_data["ci"] is None):
event_data["ci"] = {}
if (not isinstance(event_data["ci"], dict)):
event_data["ci"] = {}
if ("name" not in event_data["ci"] and "uuid" not in event_data["ci"]):
event_data["ci"] = {
"name": "Dart Command and Control System",
}
# add an endpoint to the url
url = "{}/v1/{}".format(dart.agent.api.CORK_API_URL, event_type)
result = dart.agent.api.cork.post(url, data=json.dumps(event_data), stream=False, timeout=10)
if (result.status_code == 503):
self.logger.warning("{} handler could not talk to CorkAPI -- skipping: 503 error".format(self.name))
self.events.put(item) # reenqueue to try again later
elif (result.status_code != 202):
self.logger.warning("{} handler received error talking to CorkAPI: {}".format(self.name, result.text.strip()))
else:
self.logger.debug("{} handler: {}".format(self.name, result.text.strip()))
except requests.RequestException as e:
self.logger.warning("{} handler could not talk to cork -- skipping: {}".format(self.name, e))
# if we have an exception we're going to try to put it on the
# queue later because maybe the CorkAPI will be working again.
self.events.put(item)
def _validate(self, packet):
try:
packet = json.loads(packet.decode("utf8", "backslashreplace"))
# if the event is not a dict then throw it out
if (not isinstance(packet, dict)):
raise EventValidationException("You must send a JSON object to the CorkAPI agent.")
# pull out the pieces we want
event_type = packet.get("type", "event")
event_data = packet.get("data", {})
# validate the event type
if (event_type.lower() not in ["event", "heartbeat"]):
self.logger.warning("{}: invalid value {} for type".format(self.name, event_type))
raise EventValidationException("You may only use the types 'event' and 'heartbeat'.")
# if the event is not a dict then throw it out
if (not isinstance(event_data, dict)):
raise EventValidationException("You must send a JSON object to the CorkAPI agent.")
return packet
except UnicodeDecodeError as e:
self.logger.warning("{}: event contained undecodable unicode data -- skipping: {}".format(self.name, e))
raise EventValidationException("The CorkAPI agent received non-UTF-8 data that could not be decoded. You must send data only in UTF-8.")
except json.decoder.JSONDecodeError as e:
self.logger.warning("{}: event contained undecodable json data -- skipping: {}".format(self.name, e))
raise EventValidationException("The CorkAPI agent received non-JSON data that could not be parsed. You must send only valid JSON data.")
except Exception as e:
self.logger.error("{}: error processing event -- skipping: {}".format(self.name, e))
raise EventValidationException("The CorkAPI agent could not process your event.")
|
multi_camera_multi_person_tracking.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import time
import queue
from threading import Thread
import json
import logging as log
import os
import random
import sys
import cv2 as cv
from utils.network_wrappers import Detector, VectorCNN, MaskRCNN, DetectionsFromFileReader
from mc_tracker.mct import MultiCameraTracker
from utils.analyzer import save_embeddings
from utils.misc import read_py_config, check_pressed_keys, AverageEstimator, set_log_config
from utils.video import MulticamCapture, NormalizerCLAHE
from utils.visualization import visualize_multicam_detections, get_target_size
from openvino.inference_engine import IECore # pylint: disable=import-error,E0611
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'common'))
import monitors
set_log_config()
def check_detectors(args):
detectors = {
'--m_detector': args.m_detector,
'--m_segmentation': args.m_segmentation,
'--detections': args.detections
}
non_empty_detectors = [(det, value) for det, value in detectors.items() if value]
det_number = len(non_empty_detectors)
if det_number == 0:
log.error('No detector specified, please specify one of the following parameters: '
'\'--m_detector\', \'--m_segmentation\' or \'--detections\'')
elif det_number > 1:
det_string = ''.join('\n\t{}={}'.format(det[0], det[1]) for det in non_empty_detectors)
log.error('Only one detector expected but got {}, please specify one of them:{}'
.format(len(non_empty_detectors), det_string))
return det_number
def update_detections(output, detections, frame_number):
for i, detection in enumerate(detections):
entry = {'frame_id': frame_number, 'scores': [], 'boxes': []}
for det in detection:
entry['boxes'].append(det[0])
entry['scores'].append(float(det[1]))
output[i].append(entry)
def save_json_file(save_path, data, description=''):
save_dir = os.path.dirname(save_path)
if save_dir and not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(save_path, 'w') as outfile:
json.dump(data, outfile)
if description:
log.info('{} saved to {}'.format(description, save_path))
class FramesThreadBody:
def __init__(self, capture, max_queue_length=2):
self.process = True
self.frames_queue = queue.Queue()
self.capture = capture
self.max_queue_length = max_queue_length
def __call__(self):
while self.process:
if self.frames_queue.qsize() > self.max_queue_length:
time.sleep(0.1)
has_frames, frames = self.capture.get_frames()
if not has_frames and self.frames_queue.empty():
self.process = False
break
if has_frames:
self.frames_queue.put(frames)
def run(params, config, capture, detector, reid):
win_name = 'Multi camera tracking'
frame_number = 0
avg_latency = AverageEstimator()
output_detections = [[] for _ in range(capture.get_num_sources())]
key = -1
if config['normalizer_config']['enabled']:
capture.add_transform(
NormalizerCLAHE(
config['normalizer_config']['clip_limit'],
config['normalizer_config']['tile_size'],
)
)
tracker = MultiCameraTracker(capture.get_num_sources(), reid, config['sct_config'], **config['mct_config'],
visual_analyze=config['analyzer'])
thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2)
frames_thread = Thread(target=thread_body)
frames_thread.start()
if len(params.output_video):
frame_size, fps = capture.get_source_parameters()
target_width, target_height = get_target_size(frame_size, None, **config['visualization_config'])
video_output_size = (target_width, target_height)
fourcc = cv.VideoWriter_fourcc(*'XVID')
output_video = cv.VideoWriter(params.output_video, fourcc, min(fps), video_output_size)
else:
output_video = None
prev_frames = thread_body.frames_queue.get()
detector.run_async(prev_frames, frame_number)
presenter = monitors.Presenter(params.utilization_monitors, 0)
while thread_body.process:
if not params.no_show:
key = check_pressed_keys(key)
if key == 27:
break
presenter.handleKey(key)
start = time.perf_counter()
try:
frames = thread_body.frames_queue.get_nowait()
except queue.Empty:
frames = None
if frames is None:
continue
all_detections = detector.wait_and_grab()
if params.save_detections:
update_detections(output_detections, all_detections, frame_number)
frame_number += 1
detector.run_async(frames, frame_number)
all_masks = [[] for _ in range(len(all_detections))]
for i, detections in enumerate(all_detections):
all_detections[i] = [det[0] for det in detections]
all_masks[i] = [det[2] for det in detections if len(det) == 3]
tracker.process(prev_frames, all_detections, all_masks)
tracked_objects = tracker.get_tracked_objects()
latency = max(time.perf_counter() - start, sys.float_info.epsilon)
avg_latency.update(latency)
fps = round(1. / latency, 1)
vis = visualize_multicam_detections(prev_frames, tracked_objects, fps, **config['visualization_config'])
presenter.drawGraphs(vis)
if not params.no_show:
cv.imshow(win_name, vis)
if output_video:
output_video.write(cv.resize(vis, video_output_size))
print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
frame_number, fps, 1. / avg_latency.get()), end="")
prev_frames, frames = frames, prev_frames
print(presenter.reportMeans())
print('')
thread_body.process = False
frames_thread.join()
if len(params.history_file):
save_json_file(params.history_file, tracker.get_all_tracks_history(), description='History file')
if len(params.save_detections):
save_json_file(params.save_detections, output_detections, description='Detections')
if len(config['embeddings']['save_path']):
save_embeddings(tracker.scts, **config['embeddings'])
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
"""Prepares data for the person recognition demo"""
parser = argparse.ArgumentParser(description='Multi camera multi person \
tracking live demo script')
parser.add_argument('-i', type=str, nargs='+', help='Input sources (indexes \
of cameras or paths to video files)', required=True)
parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'config.py'), required=False,
help='Configuration file')
parser.add_argument('--detections', type=str, help='JSON file with bounding boxes')
parser.add_argument('-m', '--m_detector', type=str, required=False,
help='Path to the person detection model')
parser.add_argument('--t_detector', type=float, default=0.6,
help='Threshold for the person detection model')
parser.add_argument('--m_segmentation', type=str, required=False,
help='Path to the person instance segmentation model')
parser.add_argument('--t_segmentation', type=float, default=0.6,
help='Threshold for person instance segmentation model')
parser.add_argument('--m_reid', type=str, required=True,
help='Path to the person re-identification model')
parser.add_argument('--output_video', type=str, default='', required=False,
help='Optional. Path to output video')
parser.add_argument('--history_file', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save results of the demo')
parser.add_argument('--save_detections', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save bounding boxes')
parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU')
parser.add_argument('-l', '--cpu_extension',
help='MKLDNN (CPU)-targeted custom layers.Absolute \
path to a shared library with the kernels impl.',
type=str, default=None)
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
args = parser.parse_args()
if check_detectors(args) != 1:
sys.exit(1)
if len(args.config):
log.info('Reading configuration file {}'.format(args.config))
config = read_py_config(args.config)
else:
log.error('No configuration file specified. Please specify parameter \'--config\'')
sys.exit(1)
random.seed(config['random_seed'])
capture = MulticamCapture(args.i)
log.info("Creating Inference Engine")
ie = IECore()
if args.detections:
person_detector = DetectionsFromFileReader(args.detections, args.t_detector)
elif args.m_segmentation:
person_detector = MaskRCNN(ie, args.m_segmentation, args.t_segmentation,
args.device, args.cpu_extension,
capture.get_num_sources())
else:
person_detector = Detector(ie, args.m_detector, args.t_detector,
args.device, args.cpu_extension,
capture.get_num_sources())
if args.m_reid:
person_recognizer = VectorCNN(ie, args.m_reid, args.device, args.cpu_extension)
else:
person_recognizer = None
run(args, config, capture, person_detector, person_recognizer)
log.info('Demo finished successfully')
if __name__ == '__main__':
main()
|
_profiler.py
|
# Coding: utf-8
"""Profiling Module
Monitors the CPU and Memory usage of an application
"""
# Standard Library
import os
import time
import sys
# Third Party Library
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing as mp
import psutil
class Profiler:
"""
Class used to profile CPU and Memory usage.
"""
__start_time = None
__q = None
__prc_profiler = None
@classmethod
def __monitor(cls, q, prc_application_pid):
"""Monitors CPU and Memory.
Parameters
----------
q: Object
Object of Queue Class
prc_application_pid: Object
Object of Process class
"""
try:
status = 0
prc_profiler_pid = psutil.Process()
int_cpu_count = cls.server_profile('cpu')
str_cpu_max = str(int_cpu_count) + "00"
lst_cpu = []
lst_mem = []
q_limit = False
while psutil.pid_exists(prc_application_pid.pid):
flt_total_cpu_usage = 0.0
flt_total_mem_usage = 0.0
prc_children = prc_application_pid.children(recursive=True)
flt_total_child_cpu_usage = 0
prc_application_pid.cpu_percent()
for prc_child in prc_children:
prc_child.cpu_percent()
time.sleep(5)
for prc_child in prc_children:
if psutil.pid_exists(prc_child.pid):
flt_total_child_cpu_usage = flt_total_child_cpu_usage + prc_child.cpu_percent()
flt_total_mem_usage = flt_total_mem_usage + prc_child.memory_percent()
flt_total_cpu_usage = prc_application_pid.cpu_percent() + flt_total_child_cpu_usage
flt_total_mem_usage = prc_application_pid.memory_percent() + flt_total_mem_usage
lst_cpu.append((flt_total_cpu_usage/float(str_cpu_max))*100)
lst_mem.append(flt_total_mem_usage)
if q_limit == True:
q.get()
q.get()
q.put(status)
q.put(lst_cpu)
q.put(lst_mem)
q_limit = True
except Exception as error:
status = 1
q.put(status)
q.put(error)
@classmethod
def server_profile(cls, option=None):
"""Retrieve CPU and Memory information.
Parameters
----------
option: str
Default is None. Returns number of cpu cores, total memory, and available memory
Optional parameter, available options: 'cpu' or 'memory'
"""
cpu_count = psutil.cpu_count()
total_mem = psutil.virtual_memory()[0] >> 30
avail_mem = psutil.virtual_memory()[1] >> 30
used_mem = psutil.virtual_memory()[3] >> 30
if(option == None):
return cpu_count, total_mem, avail_mem, used_mem
elif(option == 'cpu'):
return cpu_count
elif(option == 'memory'):
return total_mem, avail_mem, used_mem
else:
raise ValueError(
"Invalid parameter, available options: cpu, memory")
@classmethod
def start_profiling(cls):
"""
Begins profiling.
"""
try:
cls.__start_time = time.time()
prc_application_pid = psutil.Process()
cls.__q = mp.Queue()
cls.__prc_profiler = mp.Process(target=cls.__monitor, args=(cls.__q, prc_application_pid))
cls.__prc_profiler.start()
except Exception as error:
print("Start profiling got error: ", error)
if(cls.__prc_profiler.is_alive()):
cls.__prc_profiler.terminate()
@classmethod
def end_profiling(cls):
"""
Ends Profiling and generates two graphs.
"""
try:
status = cls.__q.get()
if status == 0:
elapsed_time = time.time() - cls.__start_time
lst_cpu = cls.__q.get()
lst_mem = cls.__q.get()
# Plot CPU
cpu = [np.round(i, 2) for i in lst_cpu]
avg_cpu = np.round(sum(lst_cpu)/len(lst_cpu), 2)
cpu = [0.0] + cpu
time_cpu = list(np.round(np.linspace(0, elapsed_time, len(cpu)), 2))
plt.figure(figsize=(15,10))
plt.plot(time_cpu, cpu)
y_pos = np.arange(0, 105, step=5)
y_list = list(np.arange(0, 105, step=5))
plt.yticks(y_pos, y_list)
plt.xlabel('Time(s)')
plt.ylabel('CPU Usage(%)')
plt.title('CPU Profiling')
str_text = "Number of CPU Cores: " + str(cls.server_profile('cpu')) + "\n"
str_text = str_text + "Max CPU Usage: " + str(np.round(max(lst_cpu), 2)) + " %\n"
str_text = str_text + "Average CPU Usage: " + str(avg_cpu) + " %\n"
plt.figtext(0.7, 0.7, str_text)
plt.savefig('CPU_profiling.png')
# Plot MEM
mem = [np.round(i, 2) for i in lst_mem]
avg_mem = np.round(sum(lst_mem)/len(lst_mem), 2)
mem = [0.0] + mem
time_mem = list(np.round(np.linspace(0, elapsed_time, len(mem)), 2))
plt.figure(figsize=(15, 10))
plt.plot(time_mem, mem)
y_pos = np.arange(0, 105, step=5)
y_list = list(np.arange(0, 105, step=5))
plt.yticks(y_pos, y_list)
plt.xlabel('Time(s)')
plt.ylabel('MEM Usage(%)')
plt.title('Memory Profiling')
str_text = "Total Memory: " + str(cls.server_profile('memory')[0]) + " GB\n"
# str_text = str_text + "Used Memory: " + str(cls.server_profile('memory')[2]) + " GB\n"
# str_text = str_text + "Available Memory: " + str(cls.server_profile('memory')[1]) + " GB\n"
str_text = str_text + "Max MEM usage: " + str(np.round(max(lst_mem), 2)) + " %\n"
plt.figtext(0.7, 0.7, str_text)
plt.savefig('MEM_profiling.png')
if(cls.__prc_profiler.is_alive()):
cls.__prc_profiler.terminate()
else:
error = cls.__q.get()
print("Profiler got error: ", error)
if(cls.__prc_profiler.is_alive()):
cls.__prc_profiler.terminate()
except Exception as error:
print("End profiler got error: ", error)
if(cls.__prc_profiler.is_alive()):
cls.__prc_profiler.terminate()
|
test_html.py
|
from functools import partial
from importlib import reload
from io import BytesIO, StringIO
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=td.skip_if_no("bs4")),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
url, "First Federal Bank of Florida", attrs={"id": "table"}
)
with tm.assert_produces_warning(FutureWarning):
df2 = self.read_html(url, "Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@pytest.mark.xfail(reason="Html file was removed")
@tm.network
def test_banklist_url(self):
url = "https://www.fdic.gov/bank/individual/failed/banklist.html"
df1 = self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "table"}
)
df2 = self.read_html(url, match="Metcalf Bank", attrs={"id": "table"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
msg = (
r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns"
)
with pytest.raises(ParserError, match=msg):
self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.