source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
ws.py | from queue import Queue, Empty
import time
import threading
import logging
import atexit
import json
import ssl
import urllib.parse
from awxkit.config import config
log = logging.getLogger(__name__)
class WSClientException(Exception):
pass
changed = 'changed'
limit_reached = 'limit_reached'
status_changed = 'status_changed'
summary = 'summary'
class WSClient(object):
"""Provides a basic means of testing pub/sub notifications with payloads similar to
'groups': {'jobs': ['status_changed', 'summary'],
'schedules': ['changed'],
'ad_hoc_command_events': [ids...],
'job_events': [ids...],
'workflow_events': [ids...],
'project_update_events': [ids...],
'inventory_update_events': [ids...],
'system_job_events': [ids...],
'control': ['limit_reached']}
e.x:
```
ws = WSClient(token, port=8013, secure=False).connect()
ws.job_details()
... # launch job
job_messages = [msg for msg in ws]
ws.ad_hoc_stdout()
... # launch ad hoc command
ad_hoc_messages = [msg for msg in ws]
ws.close()
```
"""
# Subscription group types
def __init__(self, token=None, hostname='', port=443, secure=True, session_id=None, csrftoken=None):
# delay this import, because this is an optional dependency
import websocket
if not hostname:
result = urllib.parse.urlparse(config.base_url)
secure = result.scheme == 'https'
port = result.port
if port is None:
port = 80
if secure:
port = 443
# should we be adding result.path here?
hostname = result.hostname
self.port = port
self._use_ssl = secure
self.hostname = hostname
self.token = token
self.session_id = session_id
self.csrftoken = csrftoken
self._recv_queue = Queue()
self._ws_closed = False
self._ws_connected_flag = threading.Event()
if self.token is not None:
auth_cookie = 'token="{0.token}";'.format(self)
elif self.session_id is not None:
auth_cookie = 'sessionid="{0.session_id}"'.format(self)
if self.csrftoken:
auth_cookie += ';csrftoken={0.csrftoken}'.format(self)
else:
auth_cookie = ''
pref = 'wss://' if self._use_ssl else 'ws://'
url = '{0}{1.hostname}:{1.port}/websocket/'.format(pref, self)
self.ws = websocket.WebSocketApp(url,
on_open=self._on_open,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
cookie=auth_cookie)
self._message_cache = []
self._should_subscribe_to_pending_job = False
def connect(self):
wst = threading.Thread(target=self._ws_run_forever, args=(self.ws, {"cert_reqs": ssl.CERT_NONE}))
wst.daemon = True
wst.start()
atexit.register(self.close)
if not self._ws_connected_flag.wait(20):
raise WSClientException('Failed to establish channel connection w/ AWX.')
return self
def close(self):
log.info('close method was called, but ignoring')
if not self._ws_closed:
log.info('Closing websocket connection.')
self.ws.close()
def job_details(self, *job_ids):
"""subscribes to job status, summary, and, for the specified ids, job events"""
self.subscribe(jobs=[status_changed, summary], job_events=list(job_ids))
def pending_job_details(self):
"""subscribes to job status and summary, with responsive
job event subscription for an id provided by AWX
"""
self.subscribe_to_pending_events('job_events', [status_changed, summary])
def status_changes(self):
self.subscribe(jobs=[status_changed])
def job_stdout(self, *job_ids):
self.subscribe(jobs=[status_changed], job_events=list(job_ids))
def pending_job_stdout(self):
self.subscribe_to_pending_events('job_events')
# mirror page behavior
def ad_hoc_stdout(self, *ahc_ids):
self.subscribe(jobs=[status_changed], ad_hoc_command_events=list(ahc_ids))
def pending_ad_hoc_stdout(self):
self.subscribe_to_pending_events('ad_hoc_command_events')
def project_update_stdout(self, *project_update_ids):
self.subscribe(jobs=[status_changed], project_update_events=list(project_update_ids))
def pending_project_update_stdout(self):
self.subscribe_to_pending_events('project_update_events')
def inventory_update_stdout(self, *inventory_update_ids):
self.subscribe(jobs=[status_changed], inventory_update_events=list(inventory_update_ids))
def pending_inventory_update_stdout(self):
self.subscribe_to_pending_events('inventory_update_events')
def workflow_events(self, *wfjt_ids):
self.subscribe(jobs=[status_changed], workflow_events=list(wfjt_ids))
def pending_workflow_events(self):
self.subscribe_to_pending_events('workflow_events')
def system_job_events(self, *system_job_ids):
self.subscribe(jobs=[status_changed], system_job_events=list(system_job_ids))
def pending_system_job_events(self):
self.subscribe_to_pending_events('system_job_events')
def subscribe_to_pending_events(self, events, jobs=[status_changed]):
self._should_subscribe_to_pending_job = dict(jobs=jobs, events=events)
self.subscribe(jobs=jobs)
# mirror page behavior
def jobs_list(self):
self.subscribe(jobs=[status_changed, summary], schedules=[changed])
# mirror page behavior
def dashboard(self):
self.subscribe(jobs=[status_changed])
def subscribe(self, **groups):
"""Sends a subscription request for the specified channel groups.
```
ws.subscribe(jobs=[ws.status_changed, ws.summary],
job_events=[1,2,3])
```
"""
self._subscribe(groups=groups)
def _subscribe(self, **payload):
payload['xrftoken'] = self.csrftoken
self._send(json.dumps(payload))
def unsubscribe(self):
self._send(json.dumps(dict(groups={}, xrftoken=self.csrftoken)))
# it takes time for the unsubscribe event to be recieved and consumed and for
# messages to stop being put on the queue for daphne to send to us
time.sleep(5)
def _on_message(self, message):
message = json.loads(message)
log.debug('received message: {}'.format(message))
if all([message.get('group_name') == 'jobs',
message.get('status') == 'pending',
message.get('unified_job_id'),
self._should_subscribe_to_pending_job]):
if bool(message.get('project_id')) == (
self._should_subscribe_to_pending_job['events'] == 'project_update_events'):
self._update_subscription(message['unified_job_id'])
return self._recv_queue.put(message)
def _update_subscription(self, job_id):
subscription = dict(jobs=self._should_subscribe_to_pending_job['jobs'])
events = self._should_subscribe_to_pending_job['events']
subscription[events] = [job_id]
self.subscribe(**subscription)
self._should_subscribe_to_pending_job = False
def _on_open(self):
self._ws_connected_flag.set()
def _on_error(self, error):
log.info('Error received: {}'.format(error))
def _on_close(self):
log.info('Successfully closed ws.')
self._ws_closed = True
def _ws_run_forever(self, sockopt=None, sslopt=None):
self.ws.run_forever(sslopt=sslopt)
log.debug('ws.run_forever finished')
def _recv(self, wait=False, timeout=10):
try:
msg = self._recv_queue.get(wait, timeout)
except Empty:
return None
return msg
def _send(self, data):
self.ws.send(data)
log.debug('successfully sent {}'.format(data))
def __iter__(self):
while True:
val = self._recv()
if not val:
return
yield val
|
camera.py | import cv2
import threading
import time
from datetime import datetime
from ...node import NodeBase
from ...view.cv.facialrecogn import FacialRecognition
from ...model.messages import SayMessage
SOMEONE_THERE = 1
NO_ONE_THERE = 0
class CameraController(NodeBase):
def __init__(self, node_name="camera", host="127.0.0.1", port=1883,
username=None, password=None, subscribe_dict={}, run_sleep=0.1,
show_frames=1):
super().__init__(node_name, host, port, username, password, subscribe_dict, run_sleep)
self.show_frames = show_frames
self.init()
self.vid = cv2.VideoCapture(0)
self.add_subscribe('+/facialrecognition/init', self.handle_facial_recognition_init)
self.add_subscribe('+/facialrecognition/start', self.handle_facial_recognition_start)
self.add_subscribe('+/facialrecognition/stop', self.handle_facial_recognition_stop)
# TODO: don't use default params
self.fr = FacialRecognition(confidence=0.85)
def init(self):
self.state = NO_ONE_THERE
self.last_time_someone_seen = None
self.last_time_someone_seen_threshold = 60
self.last_time_no_one_seen_threshold = 60
self.start_time = datetime.now()
self.capture_frames = False
def run(self):
self.run_thread = threading.Thread(target=super().run)
self.run_thread.start()
self.running = True
while self.running:
if self.capture_frames:
self.process_frame()
time.sleep(self.run_sleep)
self.shutdown()
def shutdown(self):
self.vid.release()
exit()
def process_someone_there(self):
print("someone is here ")
if self.state == NO_ONE_THERE:
self.state = SOMEONE_THERE
self.publish("camera/someonethere")
self.last_time_someone_seen = datetime.now()
def how_long_no_one_seen(self):
now = datetime.now()
if self.last_time_someone_seen is None:
return (now - self.start_time).seconds
return (now - self.last_time_someone_seen).seconds
def process_no_one_there(self):
print("no one is there")
how_long_no_one_seen = self.how_long_no_one_seen()
if how_long_no_one_seen is not None and how_long_no_one_seen > self.last_time_no_one_seen_threshold:
self.state = NO_ONE_THERE
self.publish("camera/noonethere")
def process_frame(self):
print("processing frames...")
cnt = 0
last_num_of_people = 0
while self.running and self.capture_frames:
ret, frame = self.vid.read()
if not ret:
continue
frame, frame_data = self.fr.detect_faces(frame)
#self.publish("camera/frame", frame_data.to_json())
#print(frame_data.to_json())
if self.show_frames:
# imshow needs to be run in the main thread
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self.capture_frames = False
break
else:
time.sleep(0.1)
cnt += 1
num_of_people = frame_data.num_of_people
if num_of_people > 0:
self.process_someone_there()
else:
self.process_no_one_there()
print("###last_time_someone_seen: ", self.last_time_someone_seen)
print("###last_time_someone_seen_threshold: ", self.last_time_someone_seen_threshold)
print("###how_long_no_one_seen: ", self.how_long_no_one_seen())
print("###===========================")
cv2.destroyAllWindows()
self.publish("camera/photo/complete")
def handle_facial_recognition_init(self, client, userdata, message):
self.init()
def handle_facial_recognition_start(self, client, userdata, message):
print("###handle_facial_recognition_start")
self.capture_frames = True
def handle_facial_recognition_stop(self, client, userdata, message):
print("###handle_facial_recognition_stop")
self.capture_frames = False
|
TrackerServer.py | # -*- coding: utf-8 -*-
import logging
from multiprocessing import Process, Queue
import gevent
from sledilnik.TrackerGame import TrackerGame
from so2.servers.Server import Server
class TrackerServer(Server):
"""Tracker process babysitter
Server spawns an external OpenCV tracker process and reads data from it.
"""
def __init__(self):
Server.__init__(self)
self.logger = logging.getLogger('sledenje-objektom.TrackerServer')
self.state = None
self.queue = Queue()
self.tracker = TrackerGame()
self.tracker.debug = True
self.tracker.fileNamesConfig.videoSource = 'http://192.168.1.117/mjpg/video.mjpg'
self.p = Process(target=self.tracker.start, args=(self.queue,))
def _run(self):
# Start tracker in another process and open message queue
self.p.start()
self.logger.info("Tracker server started.")
while True:
# Update state from tracker
if not self.p.is_alive():
self.logger.warning("Tracker stopped. Restarting...")
self.p = Process(target=self.tracker.start, args=(self.queue,))
self.p.start()
if not self.queue.empty():
self.state = self.queue.get()
self.updated.set()
gevent.sleep(0.01)
self.updated.clear()
|
utils.py | # -*- coding: utf-8 -*-
"""
flask_testing.utils
~~~~~~~~~~~~~~~~~~~
Flask unittest integration.
:copyright: (c) 2010 by Dan Jacob.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, with_statement
import gc
import time
try:
import unittest2 as unittest
except ImportError:
import unittest
import multiprocessing
from werkzeug import cached_property
# Use Flask's preferred JSON module so that our runtime behavior matches.
from flask import json_available, templating, template_rendered
if json_available:
from flask import json
# we'll use signals for template-related tests if
# available in this version of Flask
try:
import blinker
_is_signals = True
except ImportError: # pragma: no cover
_is_signals = False
__all__ = ["TestCase"]
class ContextVariableDoesNotExist(Exception):
pass
class JsonResponseMixin(object):
"""
Mixin with testing helper methods
"""
@cached_property
def json(self):
if not json_available: # pragma: no cover
raise NotImplementedError
return json.loads(self.data)
def _make_test_response(response_class):
class TestResponse(response_class, JsonResponseMixin):
pass
return TestResponse
def _empty_render(template, context, app):
"""
Used to monkey patch the render_template flask method when
the render_templates property is set to False in the TestCase
"""
if _is_signals:
template_rendered.send(app, template=template, context=context)
return ""
class TestCase(unittest.TestCase):
render_templates = True
run_gc_after_test = False
def create_app(self):
"""
Create your Flask app here, with any
configuration you need.
"""
raise NotImplementedError
def __call__(self, result=None):
"""
Does the required setup, doing it here
means you don't have to call super.setUp
in subclasses.
"""
try:
self._pre_setup()
super(TestCase, self).__call__(result)
finally:
self._post_teardown()
def _pre_setup(self):
self.app = self.create_app()
self._orig_response_class = self.app.response_class
self.app.response_class = _make_test_response(self.app.response_class)
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
if not self.render_templates:
# Monkey patch the original template render with a empty render
self._original_template_render = templating._render
templating._render = _empty_render
self.templates = []
if _is_signals:
template_rendered.connect(self._add_template)
def _add_template(self, app, template, context):
if len(self.templates) > 0:
self.templates = []
self.templates.append((template, context))
def _post_teardown(self):
if getattr(self, '_ctx', None) is not None:
self._ctx.pop()
del self._ctx
if getattr(self, 'app', None) is not None:
if getattr(self, '_orig_response_class', None) is not None:
self.app.response_class = self._orig_response_class
del self.app
if hasattr(self, 'client'):
del self.client
if hasattr(self, 'templates'):
del self.templates
if _is_signals:
template_rendered.disconnect(self._add_template)
if hasattr(self, '_true_render'):
templating._render = self._true_render
if self.run_gc_after_test:
gc.collect()
def assertTemplateUsed(self, name, tmpl_name_attribute='name'):
"""
Checks if a given template is used in the request.
Only works if your version of Flask has signals
support (0.6+) and blinker is installed.
If the template engine used is not Jinja2, provide
``tmpl_name_attribute`` with a value of its `Template`
class attribute name which contains the provided ``name`` value.
:versionadded: 0.2
:param name: template name
:param tmpl_name_attribute: template engine specific attribute name
"""
if not _is_signals:
raise RuntimeError("Signals not supported")
for template, context in self.templates:
if getattr(template, tmpl_name_attribute) == name:
return True
raise AssertionError("template %s not used" % name)
assert_template_used = assertTemplateUsed
def get_context_variable(self, name):
"""
Returns a variable from the context passed to the
template. Only works if your version of Flask
has signals support (0.6+) and blinker is installed.
Raises a ContextVariableDoesNotExist exception if does
not exist in context.
:versionadded: 0.2
:param name: name of variable
"""
if not _is_signals:
raise RuntimeError("Signals not supported")
for template, context in self.templates:
if name in context:
return context[name]
raise ContextVariableDoesNotExist
def assertContext(self, name, value):
"""
Checks if given name exists in the template context
and equals the given value.
:versionadded: 0.2
:param name: name of context variable
:param value: value to check against
"""
try:
self.assertEqual(self.get_context_variable(name), value)
except ContextVariableDoesNotExist:
self.fail("Context variable does not exist: %s" % name)
assert_context = assertContext
def assertRedirects(self, response, location):
"""
Checks if response is an HTTP redirect to the
given location.
:param response: Flask response
:param location: relative URL (i.e. without **http://localhost**)
"""
self.assertTrue(response.status_code in (301, 302))
self.assertEqual(response.location, "http://localhost" + location)
assert_redirects = assertRedirects
def assertStatus(self, response, status_code, message=None):
"""
Helper method to check matching response status.
:param response: Flask response
:param status_code: response status code (e.g. 200)
:param message: Message to display on test failure
"""
message = message or 'HTTP Status %s expected but got %s' \
% (status_code, response.status_code)
self.assertEqual(response.status_code, status_code, message)
assert_status = assertStatus
def assert200(self, response, message=None):
"""
Checks if response status code is 200
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 200, message)
assert_200 = assert200
def assert400(self, response, message=None):
"""
Checks if response status code is 400
:versionadded: 0.2.5
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 400, message)
assert_400 = assert400
def assert401(self, response, message=None):
"""
Checks if response status code is 401
:versionadded: 0.2.1
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 401, message)
assert_401 = assert401
def assert403(self, response, message=None):
"""
Checks if response status code is 403
:versionadded: 0.2
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 403, message)
assert_403 = assert403
def assert404(self, response, message=None):
"""
Checks if response status code is 404
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 404, message)
assert_404 = assert404
def assert405(self, response, message=None):
"""
Checks if response status code is 405
:versionadded: 0.2
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 405, message)
assert_405 = assert405
def assert500(self, response, message=None):
"""
Checks if response status code is 500
:versionadded: 0.4.1
:param response: Flask response
:param message: Message to display on test failure
"""
self.assertStatus(response, 500, message)
assert_500 = assert500
# A LiveServerTestCase useful with Selenium or headless browsers
# Inspired by https://docs.djangoproject.com/en/dev/topics/testing/#django.test.LiveServerTestCase
class LiveServerTestCase(unittest.TestCase):
def create_app(self):
"""
Create your Flask app here, with any
configuration you need.
"""
raise NotImplementedError
def __call__(self, result=None):
"""
Does the required setup, doing it here means you don't have to
call super.setUp in subclasses.
"""
# Get the app
self.app = self.create_app()
try:
self._spawn_live_server()
super(LiveServerTestCase, self).__call__(result)
finally:
self._terminate_live_server()
def get_server_url(self):
"""
Return the url of the test server
"""
return 'http://localhost:%s' % self.port
def _spawn_live_server(self):
self._process = None
self.port = self.app.config.get('LIVESERVER_PORT', 5000)
worker = lambda app, port: app.run(port=port)
self._process = multiprocessing.Process(
target=worker, args=(self.app, self.port)
)
self._process.start()
# we must wait the server start listening
time.sleep(1)
def _terminate_live_server(self):
if self._process:
self._process.terminate()
|
thread_add_syn_change.py | #!/usr/bin/env python
"每次都打印200,因为共享资源的访问已经同步化"
import threading
import time
COUNT_INT = 0
def adder(count_int_mutex):
"间隔地给COUNT_INT加1"
global COUNT_INT
with count_int_mutex:
COUNT_INT += 1
time.sleep(0.5)
with count_int_mutex:
COUNT_INT += 1
def main():
global COUNT_INT
count_int_mutex = threading.Lock()
listThread = []
for i_int in range(100):
Thread = threading.Thread(target=adder, args=(count_int_mutex,))
listThread.append(Thread)
Thread.start()
for i_Thread in listThread:
i_Thread.join()
print(COUNT_INT)
if __name__ == "__main__":
main()
|
server_voice.py | #!/usr/bin/python3
import socket
import threading
class Server:
def __init__(self):
self.ip = socket.gethostbyname(socket.gethostname())
while 1:
try:
#self.port = int(input('Enter port number to run on --> '))
self.port = 4322
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((self.ip, self.port))
break
except:
print("Couldn't bind to that port")
self.connections = []
self.accept_connections()
def accept_connections(self):
self.s.listen(100)
print('Running on IP: '+self.ip)
print('Running on port: '+str(self.port))
while True:
c, addr = self.s.accept()
self.connections.append(c)
threading.Thread(target=self.handle_client,args=(c,addr,)).start()
def broadcast(self, sock, data):
for client in self.connections:
if client != self.s and client != sock:
try:
client.send(data)
except:
pass
def handle_client(self,c,addr):
while 1:
try:
data = c.recv(1024)
self.broadcast(c, data)
except socket.error:
c.close()
server = Server()
|
ipcontrollerapp.py | #!/usr/bin/env python
# encoding: utf-8
"""
The IPython controller application.
Authors:
* Brian Granger
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import with_statement
import os
import socket
import stat
import sys
import uuid
from multiprocessing import Process
import zmq
from zmq.devices import ProcessMonitoredQueue
from zmq.log.handlers import PUBHandler
from zmq.utils import jsonapi as json
from IPython.config.application import boolean_flag
from IPython.core.profiledir import ProfileDir
from IPython.parallel.apps.baseapp import (
BaseParallelApplication,
base_aliases,
base_flags,
)
from IPython.utils.importstring import import_item
from IPython.utils.traitlets import Instance, Unicode, Bool, List, Dict
# from IPython.parallel.controller.controller import ControllerFactory
from IPython.zmq.session import Session
from IPython.parallel.controller.heartmonitor import HeartMonitor
from IPython.parallel.controller.hub import HubFactory
from IPython.parallel.controller.scheduler import TaskScheduler,launch_scheduler
from IPython.parallel.controller.sqlitedb import SQLiteDB
from IPython.parallel.util import signal_children, split_url, asbytes
# conditional import of MongoDB backend class
try:
from IPython.parallel.controller.mongodb import MongoDB
except ImportError:
maybe_mongo = []
else:
maybe_mongo = [MongoDB]
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
#: The default config file name for this application
default_config_file_name = u'ipcontroller_config.py'
_description = """Start the IPython controller for parallel computing.
The IPython controller provides a gateway between the IPython engines and
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
your ipython directory and named as "profile_name". See the `profile`
and `profile-dir` options for details.
"""
_examples = """
ipcontroller --ip=192.168.0.1 --port=1000 # listen on ip, port for engines
ipcontroller --scheme=pure # use the pure zeromq scheduler
"""
#-----------------------------------------------------------------------------
# The main application
#-----------------------------------------------------------------------------
flags = {}
flags.update(base_flags)
flags.update({
'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
'Use threads instead of processes for the schedulers'),
'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}},
'use the SQLiteDB backend'),
'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}},
'use the MongoDB backend'),
'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}},
'use the in-memory DictDB backend'),
'reuse' : ({'IPControllerApp' : {'reuse_files' : True}},
'reuse existing json connection files')
})
flags.update(boolean_flag('secure', 'IPControllerApp.secure',
"Use HMAC digests for authentication of messages.",
"Don't authenticate messages."
))
aliases = dict(
secure = 'IPControllerApp.secure',
ssh = 'IPControllerApp.ssh_server',
enginessh = 'IPControllerApp.engine_ssh_server',
location = 'IPControllerApp.location',
ident = 'Session.session',
user = 'Session.username',
keyfile = 'Session.keyfile',
url = 'HubFactory.url',
ip = 'HubFactory.ip',
transport = 'HubFactory.transport',
port = 'HubFactory.regport',
ping = 'HeartMonitor.period',
scheme = 'TaskScheduler.scheme_name',
hwm = 'TaskScheduler.hwm',
)
aliases.update(base_aliases)
class IPControllerApp(BaseParallelApplication):
name = u'ipcontroller'
description = _description
examples = _examples
config_file_name = Unicode(default_config_file_name)
classes = [ProfileDir, Session, HubFactory, TaskScheduler, HeartMonitor, SQLiteDB] + maybe_mongo
# change default to True
auto_create = Bool(True, config=True,
help="""Whether to create profile dir if it doesn't exist.""")
reuse_files = Bool(False, config=True,
help='Whether to reuse existing json connection files.'
)
secure = Bool(True, config=True,
help='Whether to use HMAC digests for extra message authentication.'
)
ssh_server = Unicode(u'', config=True,
help="""ssh url for clients to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
engine_ssh_server = Unicode(u'', config=True,
help="""ssh url for engines to use when connecting to the Controller
processes. It should be of the form: [user@]server[:port]. The
Controller's listening addresses must be accessible from the ssh server""",
)
location = Unicode(u'', config=True,
help="""The external IP or domain name of the Controller, used for disambiguating
engine and client connections.""",
)
import_statements = List([], config=True,
help="import statements to be run at startup. Necessary in some environments"
)
use_threads = Bool(False, config=True,
help='Use threads instead of processes for the schedulers',
)
# internal
children = List()
mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
def _use_threads_changed(self, name, old, new):
self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
aliases = Dict(aliases)
flags = Dict(flags)
def save_connection_dict(self, fname, cdict):
"""save a connection dict to json file."""
c = self.config
url = cdict['url']
location = cdict['location']
if not location:
try:
proto,ip,port = split_url(url)
except AssertionError:
pass
else:
try:
location = socket.gethostbyname_ex(socket.gethostname())[2][-1]
except (socket.gaierror, IndexError):
self.log.warn("Could not identify this machine's IP, assuming 127.0.0.1."
" You may need to specify '--location=<external_ip_address>' to help"
" IPython decide when to connect via loopback.")
location = '127.0.0.1'
cdict['location'] = location
fname = os.path.join(self.profile_dir.security_dir, fname)
with open(fname, 'wb') as f:
f.write(json.dumps(cdict, indent=2))
os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR)
def load_config_from_json(self):
"""load config from existing json connector files."""
c = self.config
# load from engine config
with open(os.path.join(self.profile_dir.security_dir, 'ipcontroller-engine.json')) as f:
cfg = json.loads(f.read())
key = c.Session.key = asbytes(cfg['exec_key'])
xport,addr = cfg['url'].split('://')
c.HubFactory.engine_transport = xport
ip,ports = addr.split(':')
c.HubFactory.engine_ip = ip
c.HubFactory.regport = int(ports)
self.location = cfg['location']
if not self.engine_ssh_server:
self.engine_ssh_server = cfg['ssh']
# load client config
with open(os.path.join(self.profile_dir.security_dir, 'ipcontroller-client.json')) as f:
cfg = json.loads(f.read())
assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys"
xport,addr = cfg['url'].split('://')
c.HubFactory.client_transport = xport
ip,ports = addr.split(':')
c.HubFactory.client_ip = ip
if not self.ssh_server:
self.ssh_server = cfg['ssh']
assert int(ports) == c.HubFactory.regport, "regport mismatch"
def init_hub(self):
c = self.config
self.do_import_statements()
reusing = self.reuse_files
if reusing:
try:
self.load_config_from_json()
except (AssertionError,IOError):
reusing=False
# check again, because reusing may have failed:
if reusing:
pass
elif self.secure:
key = str(uuid.uuid4())
# keyfile = os.path.join(self.profile_dir.security_dir, self.exec_key)
# with open(keyfile, 'w') as f:
# f.write(key)
# os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR)
c.Session.key = asbytes(key)
else:
key = c.Session.key = b''
try:
self.factory = HubFactory(config=c, log=self.log)
# self.start_logging()
self.factory.init_hub()
except:
self.log.error("Couldn't construct the Controller", exc_info=True)
self.exit(1)
if not reusing:
# save to new json config files
f = self.factory
cdict = {'exec_key' : key,
'ssh' : self.ssh_server,
'url' : "%s://%s:%s"%(f.client_transport, f.client_ip, f.regport),
'location' : self.location
}
self.save_connection_dict('ipcontroller-client.json', cdict)
edict = cdict
edict['url']="%s://%s:%s"%((f.client_transport, f.client_ip, f.regport))
edict['ssh'] = self.engine_ssh_server
self.save_connection_dict('ipcontroller-engine.json', edict)
#
def init_schedulers(self):
children = self.children
mq = import_item(str(self.mq_class))
hub = self.factory
# maybe_inproc = 'inproc://monitor' if self.use_threads else self.monitor_url
# IOPub relay (in a Process)
q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub')
q.bind_in(hub.client_info['iopub'])
q.bind_out(hub.engine_info['iopub'])
q.setsockopt_out(zmq.SUBSCRIBE, b'')
q.connect_mon(hub.monitor_url)
q.daemon=True
children.append(q)
# Multiplexer Queue (in a Process)
q = mq(zmq.XREP, zmq.XREP, zmq.PUB, b'in', b'out')
q.bind_in(hub.client_info['mux'])
q.setsockopt_in(zmq.IDENTITY, b'mux')
q.bind_out(hub.engine_info['mux'])
q.connect_mon(hub.monitor_url)
q.daemon=True
children.append(q)
# Control Queue (in a Process)
q = mq(zmq.XREP, zmq.XREP, zmq.PUB, b'incontrol', b'outcontrol')
q.bind_in(hub.client_info['control'])
q.setsockopt_in(zmq.IDENTITY, b'control')
q.bind_out(hub.engine_info['control'])
q.connect_mon(hub.monitor_url)
q.daemon=True
children.append(q)
try:
scheme = self.config.TaskScheduler.scheme_name
except AttributeError:
scheme = TaskScheduler.scheme_name.get_default_value()
# Task Queue (in a Process)
if scheme == 'pure':
self.log.warn("task::using pure XREQ Task scheduler")
q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, b'intask', b'outtask')
# q.setsockopt_out(zmq.HWM, hub.hwm)
q.bind_in(hub.client_info['task'][1])
q.setsockopt_in(zmq.IDENTITY, b'task')
q.bind_out(hub.engine_info['task'])
q.connect_mon(hub.monitor_url)
q.daemon=True
children.append(q)
elif scheme == 'none':
self.log.warn("task::using no Task scheduler")
else:
self.log.info("task::using Python %s Task scheduler"%scheme)
sargs = (hub.client_info['task'][1], hub.engine_info['task'],
hub.monitor_url, hub.client_info['notification'])
kwargs = dict(logname='scheduler', loglevel=self.log_level,
log_url = self.log_url, config=dict(self.config))
if 'Process' in self.mq_class:
# run the Python scheduler in a Process
q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
q.daemon=True
children.append(q)
else:
# single-threaded Controller
kwargs['in_thread'] = True
launch_scheduler(*sargs, **kwargs)
def save_urls(self):
"""save the registration urls to files."""
c = self.config
sec_dir = self.profile_dir.security_dir
cf = self.factory
with open(os.path.join(sec_dir, 'ipcontroller-engine.url'), 'w') as f:
f.write("%s://%s:%s"%(cf.engine_transport, cf.engine_ip, cf.regport))
with open(os.path.join(sec_dir, 'ipcontroller-client.url'), 'w') as f:
f.write("%s://%s:%s"%(cf.client_transport, cf.client_ip, cf.regport))
def do_import_statements(self):
statements = self.import_statements
for s in statements:
try:
self.log.msg("Executing statement: '%s'" % s)
exec s in globals(), locals()
except:
self.log.msg("Error running statement: %s" % s)
def forward_logging(self):
if self.log_url:
self.log.info("Forwarding logging to %s"%self.log_url)
context = zmq.Context.instance()
lsock = context.socket(zmq.PUB)
lsock.connect(self.log_url)
handler = PUBHandler(lsock)
self.log.removeHandler(self._log_handler)
handler.root_topic = 'controller'
handler.setLevel(self.log_level)
self.log.addHandler(handler)
self._log_handler = handler
# #
def initialize(self, argv=None):
super(IPControllerApp, self).initialize(argv)
self.forward_logging()
self.init_hub()
self.init_schedulers()
def start(self):
# Start the subprocesses:
self.factory.start()
child_procs = []
for child in self.children:
child.start()
if isinstance(child, ProcessMonitoredQueue):
child_procs.append(child.launcher)
elif isinstance(child, Process):
child_procs.append(child)
if child_procs:
signal_children(child_procs)
self.write_pid_file(overwrite=True)
try:
self.factory.loop.start()
except KeyboardInterrupt:
self.log.critical("Interrupted, Exiting...\n")
def launch_new_instance():
"""Create and run the IPython controller"""
if sys.platform == 'win32':
# make sure we don't get called from a multiprocessing subprocess
# this can result in infinite Controllers being started on Windows
# which doesn't have a proper fork, so multiprocessing is wonky
# this only comes up when IPython has been installed using vanilla
# setuptools, and *not* distribute.
import multiprocessing
p = multiprocessing.current_process()
# the main process has name 'MainProcess'
# subprocesses will have names like 'Process-1'
if p.name != 'MainProcess':
# we are a subprocess, don't start another Controller!
return
app = IPControllerApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
launch_new_instance()
|
allreduce.py | #!/usr/bin/env python
import os
import torch as th
import torch.distributed as dist
from torch.multiprocessing import Process
def allreduce(send, recv):
""" Implementation of a ring-reduce. """
rank = dist.get_rank()
size = dist.get_world_size()
send_buff = th.zeros(send.size())
recv_buff = th.zeros(send.size())
accum = th.zeros(send.size())
accum[:] = send[:]
#th.cuda.synchronize()
left = ((rank - 1) + size) % size
right = (rank + 1) % size
for i in range(size - 1):
if i % 2 == 0:
# Send send_buff
send_req = dist.isend(send_buff, right)
dist.recv(recv_buff, left)
accum[:] += recv[:]
else:
# Send recv_buff
send_req = dist.isend(recv_buff, right)
dist.recv(send_buff, left)
accum[:] += send[:]
send_req.wait()
#th.cuda.synchronize()
recv[:] = accum[:]
def run(rank, size):
""" Distributed function to be implemented later. """
# t = th.ones(2, 2)
t = th.rand(2, 2).cuda()
# for _ in range(10000000):
for _ in range(4):
c = t.clone()
dist.all_reduce(c, dist.reduce_op.SUM)
# allreduce(t, c)
t.set_(c)
print(t)
def init_processes(rank, size, fn, backend='mpi'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
# dist.init_process_group(backend, rank=rank, world_size=size)
dist.init_process_group(backend, world_size=size)
fn(rank, size)
if __name__ == "__main__":
size = 1
processes = []
for rank in range(size):
p = Process(target=init_processes, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
GUI.py | import numpy as np
from PIL import Image
from Parser import Parser
from mazeMaker import MapMaker
from SystemControl import SystemControl
from Tkinter import Tk, Label, Frame, PhotoImage, Toplevel
import scipy.misc
import threading
from moveRobot import moveRobot
import Globals as G
from pynput import keyboard
from pirate import Pirate
from minibotConnector import minibotConnector
from pirateMapMaker import PirateMapMaker
# ECE_MODE = True
#
#
# if ECE_MODE:
# import RPi.GPIO as GPIO
# import a4988
# # This is the hack-iest Python thing, plus don't remove or judge
# else:
# import numpy as GPIO
class Gui:
"""Creates the WALL GUI according to chosen level. Communicates with the wall and the object (2D system/minibot)
Throws notifications when designated goal is reached, goal is not reached, and when user fails to provide
the information needed (i.e. the system the GUI is running on)"""
# basic stats
direction = 1
BACKGROUND = ""
BOUNDARY = 0
GOAL_X = 0
GOAL_Y = 0
START_X = 0
START_Y = 0
init_OBS = []
OBS = []
level = 1
game = 0
game_name = ""
version = 0
TWO_D = 0
MINIBOT = 1
MAZE = 0
PIRATES = 1
# conditional stats
dead_pirates = []
# conditional objects
control = None
t = None
temp_disp = None
temp_box = None
choice_serial = 1
level_label = None
game_label1 = None
game_label2 = None
version_label1 = None
version_label2 = None
level_disp_label = None
root = None
# flags
start_flag = False
thread_started = False
dead_flag = False
choice_flag = True
choice_lock = True
# file paths
rfid_file = "input/rfidAttack1.txt"
target_file = "image/target.png"
outfile = "image/outfile.gif"
obstacle_file = "image/Pirate_Hat.png"
dead_pirates_file = "image/dead_pirate.png"
path1_file = "image/path1.png"
path2_file = "image/path2.png"
path3_file = "image/path3.png"
path4_file = "image/path4.png"
bot0_file = "image/robot0.png"
bot1_file = "image/robot1.png"
bot2_file = "image/robot2.png"
bot3_file = "image/robot3.png"
temp_image = ""
game_map_for_parser = {}
def __init__(self):
"""initializes the GUI"""
self.start_flag = False
# self.minibot_con = minibotConnector()
clear_file = open("output/minibot_script.txt", "w")
clear_file.write("")
def store_game_data(self):
"""after level is chosen, variables related to the game level are stored below"""
game_data = {}
if self.game == self.MAZE:
map_data = MapMaker()
game_data = map_data.parseMap("levels/" + self.game_name + "_levels/" + self.game_name + "_" +
str(self.level))
self.game_map_for_parser = game_data
# game_data = map_data.parseMap("input/sample_map")
self.BOUNDARY = len(game_data.get("GAME_MAP"))
self.init_OBS = []
self.OBS = []
# getting the coordinates of the map that contains an obstacle
for row in range(len(game_data.get("GAME_MAP"))):
for col in range(len(game_data.get("GAME_MAP")[0])):
# 1 represents obstacle, 0 represents free space.
if game_data.get("GAME_MAP")[row][col] == 1:
pirate = Pirate(row, col)
pirate.movable = False
self.init_OBS.append(pirate)
self.OBS.append(pirate)
elif self.game == self.PIRATES:
map_data = PirateMapMaker()
game_data = map_data.parseMap("levels/" + self.game_name + "_levels/" + self.game_name + "_" +
str(self.level))
self.game_map_for_parser = game_data
self.BOUNDARY = len(game_data.get("GAME_MAP"))
self.init_OBS = []
self.OBS = []
for index in range(len(game_data.get("GAME_ENEMIES"))):
temp_data = game_data.get("GAME_ENEMIES")[index]
temp_path = temp_data.get("ENEMY_PATH")
pirate = Pirate(temp_path[0][0], temp_path[0][1])
pirate2 = Pirate(temp_path[0][0], temp_path[0][1])
pirate.movable = True
pirate2.movable = True
pirate.path = temp_path
pirate2.path = temp_path
self.init_OBS.append(pirate2)
self.OBS.append(pirate)
self.GOAL_X = game_data.get("GAME_GOAL")[0]
self.GOAL_Y = game_data.get("GAME_GOAL")[1]
self.START_X = game_data.get("GAME_START")[0]
self.START_Y = game_data.get("GAME_START")[1]
self.direction = game_data.get("GAME_START_DIRECTION")
self.BACKGROUND = game_data.get("GAME_BACKGROUND")
# storing the map data from mapMaker to the class variables of control
self.control.startX = self.START_X
self.control.startY = self.START_Y
self.control.robotX = self.START_X
self.control.robotY = self.START_Y
self.control.GoalX = self.GOAL_X
self.control.GoalY = self.GOAL_Y
self.control.dimX = self.BOUNDARY
self.control.start_dir = self.direction
self.control.direction = self.control.start_dir
self.control.OBS = self.OBS
def make_GUI(self):
"""makes the GUI"""
self.temp_disp = Tk()
self.temp_disp.title("Game Chooser")
text_label = Label(text="Please select game version: use up/down arrows")
self.game_label1 = Label(text="MAZE", bg="light blue")
self.game_label2 = Label(text="PIRATES")
text_label.grid(row=0, column=0)
self.game_label1.grid(row=1, column=0)
self.game_label2.grid(row=2, column=0)
self.choice_lock = False
def on_press(key):
"""defines what the key listener does
NOTE: Now the ECE end does not have to call a method, they need to simulate key presses."""
try:
k = key.char # single-char keys
except:
# print('Except: ' + key.name)
k = key.name # other keys
if key == keyboard.Key.esc:
return False # stop listener
if k in ['ctrl']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key pressed: ' + k)
if self.choice_flag:
if self.choice_serial == 1:
# self.game = self.temp_box.curselection()[0]
self.choice_lock = True
self.temp_disp.destroy()
self.choice_serial += 1
elif self.choice_serial == 2:
# self.version = self.temp_box.curselection()[0]
self.choice_lock = True
self.temp_disp.destroy()
self.choice_serial += 1
elif self.choice_serial == 3:
# self.level = int(self.temp_box.get())
self.choice_lock = True
self.temp_disp.destroy()
self.choice_serial += 1
print(self.choice_serial)
else:
self.temp_disp.withdraw()
self.choice_flag = False
self.root.focus_set()
else:
if not self.thread_started:
self.t = threading.Thread(target=start)
self.thread_started = True
self.start_flag = True
else:
if self.dead_flag:
self.t = None
self.t = threading.Thread(target=start)
self.start_flag = True
self.dead_flag = False
if k in ['alt_l']:
# the up key
if self.choice_serial == 1:
if not self.choice_lock and self.game == 1:
self.game -= 1
self.game_label1.config(text="MAZE", bg="light blue")
self.game_label2.config(text="PIRATES", bg="white")
elif self.choice_serial == 2:
if not self.choice_lock and self.version == 1:
self.version -= 1
self.version_label1.config(text="2D System", bg="light blue")
self.version_label2.config(text="Minibot", bg="white")
elif self.choice_serial == 3:
if not self.choice_lock and self.level < G.MAX_LEVEL:
self.level += 1
self.level_label.config(text="Please choose your beginning level: " + str(self.level))
if k in ['alt_r']:
# the down key
if self.choice_serial == 1:
if not self.choice_lock and self.game == 0:
self.game += 1
self.game_label1.config(text="MAZE", bg="white")
self.game_label2.config(text="PIRATES", bg="light blue")
elif self.choice_serial == 2:
if not self.choice_lock and self.version == 0:
self.version += 1
self.version_label1.config(text="2D System", bg="white")
self.version_label2.config(text="Minibot", bg="light blue")
elif self.choice_serial == 3:
if not self.choice_lock and self.level > 1:
self.level -= 1
self.level_label.config(text="Please choose your beginning level: " + str(self.level))
if k in ['shift']:
print('Key pressed: ' + k)
if not self.control.reset_flag:
self.control.reset_flag = True
self.choice_flag = True
"""theoretically this should work with the ece's code but it doesn't work here
because 'ctrl' and 'shift' are in the same listener. This could be fixed by separating this
into two different listeners, again, theoretically."""
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="Resetting, please confirm.")
w.pack()
self.temp_disp.grab_set()
self.control.reset()
self.control.time_step = 0
self.OBS = self.init_OBS
self.control.OBS = self.init_OBS
self.dead_pirates = []
self.control.dead_pirates = []
self.start_flag = False
self.dead_flag = True
self.control.reset_flag = False
# return False
lis = keyboard.Listener(on_press=on_press)
lis.start()
# #Motor Scanner Setup
# stepPin1 = 3
# dirPin1 = 2
# enablePin1 = 18
# sleepPin1 = 4
#
# if ECE_MODE:
# GPIO.setup(stepPin1, GPIO.OUT)
# GPIO.setup(dirPin1, GPIO.OUT)
# GPIO.setup(enablePin1, GPIO.OUT)
# GPIO.setup(sleepPin1, GPIO.OUT)
#
# GPIO.output(enablePin1, GPIO.LOW)
# GPIO.output(sleepPin1, GPIO.LOW)
# GPIO.output(dirPin1, GPIO.HIGH)
#
# #Motor Vertical
# stepPin2 = 27
# dirPin2 = 17
# enablePin2 = 23
# sleepPin2 = 22
#
# if ECE_MODE:
# GPIO.setup(stepPin2, GPIO.OUT)
# GPIO.setup(dirPin2, GPIO.OUT)
# GPIO.setup(enablePin2, GPIO.OUT)
# GPIO.setup(sleepPin2, GPIO.OUT)
#
# GPIO.output(enablePin2, GPIO.LOW)
# GPIO.output(sleepPin2, GPIO.LOW)
# GPIO.output(dirPin2, GPIO.HIGH)
#
# #Motor Horizontal
# stepPin3 = 9
# dirPin3 = 10
# enablePin3 = 24
# sleepPin3 = 11
#
# if ECE_MODE:
# GPIO.setup(stepPin3, GPIO.OUT)
# GPIO.setup(dirPin3, GPIO.OUT)
# GPIO.setup(enablePin3, GPIO.OUT)
# GPIO.setup(sleepPin3, GPIO.OUT)
#
# GPIO.output(enablePin3, GPIO.LOW)
# GPIO.output(sleepPin3, GPIO.LOW)
# GPIO.output(dirPin3, GPIO.HIGH)
#
# start_button = 6
# reset_button = 5
# scanner_top_pin = 21
# scanner_bottom_pin = 26
# horizontal_top_pin = 16
# horizontal_bottom_pin = 20
# vertical_top_pin = 13
# vertical_bottom_pin=19
#
# if ECE_MODE:
# GPIO.setup(start_button, GPIO.IN)
# GPIO.setup(reset_button, GPIO.IN)
# GPIO.setup(scanner_top_pin, GPIO.IN)
# GPIO.setup(scanner_bottom_pin, GPIO.IN)
# GPIO.setup(horizontal_top_pin, GPIO.IN)
# GPIO.setup(horizontal_bottom_pin, GPIO.IN)
# GPIO.setup(vertical_top_pin, GPIO.IN)
# GPIO.setup(vertical_bottom_pin, GPIO.IN)
#
# def reset(reset_button):
# if not self.control.reset_flag:
# self.control.reset_flag = True
# self.choice_flag = True
# self.temp_disp = Toplevel(self.root)
# w = Label(self.temp_disp, text="Resetting, please confirm.")
# w.pack()
# self.temp_disp.grab_set()
# self.control.reset()
# self.control.time_step = 0
# self.OBS = self.init_OBS
# self.control.OBS = self.init_OBS
# self.dead_pirates = []
# self.control.dead_pirates = []
# self.start_flag = False
# self.dead_flag = True
# self.control.reset_flag = False
#
# def start(start_button):
# if self.choice_flag:
# if self.choice_serial == 1:
# # self.game = self.temp_box.curselection()[0]
# self.choice_lock = True
# self.temp_disp.destroy()
# self.choice_serial += 1
# elif self.choice_serial == 2:
# # self.version = self.temp_box.curselection()[0]
# self.choice_lock = True
# self.temp_disp.destroy()
# self.choice_serial += 1
# elif self.choice_serial == 3:
# # self.level = int(self.temp_box.get())
# self.choice_lock = True
# self.temp_disp.destroy()
# self.choice_serial += 1
# else:
# self.temp_disp.withdraw()
# self.choice_flag = False
# self.root.focus_set()
# else:
# if not self.thread_started:
# self.t = threading.Thread(target=start)
# self.thread_started = True
# self.start_flag = True
# else:
# if self.dead_flag:
# self.t = None
# self.t = threading.Thread(target=start)
# self.start_flag = True
# self.dead_flag = False
#
# def stop1(scanner_top_pin):
# print(' scanner, hit top')
# if ECE_MODE:
# a4988.moveScannerDown(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
#
#
# def stop2(scanner_bottom_pin):
# print('scanner, hit bottom')
# if ECE_MODE:
# a4988.moveScannerUp(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
#
#
# def stop3(horizontal_top_pin):
# print('horizontal , hit top bound')
# if ECE_MODE:
# a4988.moveHorizontalDown(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
#
#
# def stop4(horizontal_bottom_pin):
# print('horizontal , hit bottom bound')
# if ECE_MODE:
# a4988.moveHorizontalUp(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
#
#
# def stop5(vertical_top_pin):
# print('vertical , hit top bound')
# if ECE_MODE:
# a4988.moveVerticalDown(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
#
#
# def stop6(vertical_bottom_pin):
# print('vertical , hit bottom bound')
# if ECE_MODE:
# a4988.moveVerticalUp(25)
# GPIO.output(enablePin1, GPIO.HIGH) #disable driver
## if ECE_MODE:
## GPIO.add_event_detect(start_button, GPIO.FALLING, callback=start, bouncetime=2000)
## GPIO.add_event_detect(reset_button, GPIO.FALLING, callback=reset, bouncetime=2000)
## # GPIO.add_event_detect(scanner_bottom_pin, GPIO.FALLING, callback=stop1, bouncetime=2000)
## GPIO.add_event_detect(scanner_top_pin, GPIO.FALLING, callback=stop2, bouncetime=2000)
## GPIO.add_event_detect(horizontal_top_pin, GPIO.FALLING, callback=stop3, bouncetime=2000)
## GPIO.add_event_detect(horizontal_bottom_pin, GPIO.FALLING, callback=stop4, bouncetime=2000)
## GPIO.add_event_detect(vertical_top_pin, GPIO.FALLING, callback=stop5, bouncetime=2000)
## GPIO.add_event_detect(vertical_bottom_pin, GPIO.FALLING, callback=stop6, bouncetime=2000)
self.temp_disp.mainloop()
if self.game == self.MAZE:
self.game_name = "maze"
elif self.game == self.PIRATES:
self.game_name = "pirate"
# else:
# temp1 = Tk()
# temp1.withdraw()
# tkMessageBox.showerror("Error", "Please choose a game.")
# making a choice box here to choose system (2D or minibot)
self.temp_disp = Tk()
self.temp_disp.title("Version Chooser")
text_label1 = Label(text="Please select system version: use up/down arrows", master=self.temp_disp)
self.version_label1 = Label(text="2D System", bg="light blue", master=self.temp_disp)
self.version_label2 = Label(text="Minibot", master=self.temp_disp)
text_label1.grid(row=0, column=0)
self.version_label1.grid(row=1, column=0)
self.version_label2.grid(row=2, column=0)
self.choice_lock = False
self.temp_disp.mainloop()
if self.version == self.TWO_D:
self.control = SystemControl()
elif self.version == self.MINIBOT:
self.control = moveRobot()
# self.minibot_con.start()
# else:
# temp = Tk()
# temp.withdraw()
# tkMessageBox.showerror("Error", "Please choose a version.")
# allows the player to choose a level from a spinbox (need to change to buttons in the future)
self.temp_disp = Tk()
self.temp_disp.title("Level Chooser")
self.level_label = Label(self.temp_disp, text="Please choose your beginning level: " + str(self.level))
self.level_label.grid(row=0, column=0, columnspan=3)
# self.temp_box = Spinbox(self.temp_disp, from_=1, to=G.MAX_LEVEL)
self.choice_lock = False
self.temp_disp.mainloop()
self.store_game_data()
self.choice_flag = False
self.make_grid()
# Constructs the grid according to defined dimensions and displays it on the GUI
self.root = Tk()
self.root.title("WALL")
self.level_disp_label = Label(self.root, text="Level " + str(self.level))
self.level_disp_label.grid(row=0, column=1)
frame = Frame(self.root)
self.temp_image = self.outfile
im = PhotoImage(file=self.temp_image, master=self.root)
im_label = Label(frame, image=im)
im_label.pack()
step_label = Label(self.root, text="Time Step: " + str(self.control.time_step))
step_label.grid(row=0, column=2)
def update():
"""updates the grid according to the robot's current location/direction"""
self.make_grid()
step_label.config(text="Time Step: " + str(self.control.time_step))
self.temp_image = self.outfile
tempim = PhotoImage(file=self.temp_image, master=self.root)
# changes image here
im_label.config(image=tempim)
im_label.image = tempim
im_label.pack()
# updates display every 1 second
self.root.after(1000, update)
def start():
"""runs the given file of rfid's"""
# a4988.init()
p = Parser()
p.initializeMap(self.game_map_for_parser, self.OBS)
# a4988.readRFID()
# a4988.moveScannerDown(5000)
# a4988.readRFID()
codeblock = p.runCode(p.translateRFID(self.rfid_file))
if "Error at Line" in codeblock:
s1, s2 = codeblock.split('\n')
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w1 = Label(self.temp_disp, text=s1)
w1.grid(row=0, column=0)
w2 = Label(self.temp_disp, text=s2)
w2.grid(row=1, column=0)
self.temp_disp.grab_set()
self.dead_flag = True
elif self.version == self.TWO_D:
if self.control.run(codeblock, self.OBS, self.dead_pirates):
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="Congrats! Goal reached!")
w.pack()
self.temp_disp.grab_set()
self.level += 1
self.level_disp_label.config(text="Level " + str(self.level))
if not self.level > G.MAX_LEVEL:
self.dead_pirates = []
self.control.dead_pirates = []
self.store_game_data()
self.control.time_step = 0
self.dead_flag = True
else:
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="All levels cleared")
w.pack()
self.temp_disp.grab_set()
elif not self.control.reset_flag:
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="Sorry, incorrect code. Please try again.")
w.pack()
self.temp_disp.grab_set()
self.dead_pirates = []
self.control.dead_pirates = []
self.control.reset()
self.control.time_step = 0
self.OBS = self.init_OBS
self.control.OBS = self.init_OBS
self.make_grid()
self.temp_image = self.outfile
tempim = PhotoImage(file=self.temp_image, master=self.root)
# changes image here
im_label.config(image=tempim)
im_label.image = tempim
im_label.pack()
self.dead_flag = True
else:
script = self.control.run(codeblock, self.OBS, self.dead_pirates)
file_obj = open("output/minibot_script.txt", "a")
file_obj.write("****************************EXECUTING****************************\n")
file_obj.write(script)
file_obj.write("********************************************************\n")
if self.control.check_goal():
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="Congrats! Goal reached!")
w.pack()
self.temp_disp.grab_set()
self.level += 1
self.level_disp_label.config(text="Level " + str(self.level))
if not self.level > G.MAX_LEVEL:
self.dead_pirates = []
self.control.dead_pirates = []
self.store_game_data()
self.control.time_step = 0
self.dead_flag = True
else:
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="All levels cleared")
w.pack()
self.temp_disp.grab_set()
elif not self.control.reset_flag:
self.choice_flag = True
self.temp_disp = Toplevel(self.root)
w = Label(self.temp_disp, text="Sorry, incorrect code. Please try again.")
w.pack()
self.temp_disp.grab_set()
reset_script = self.control.reset()
file_obj.write("****************************RESETTING****************************\n")
file_obj.write(reset_script)
file_obj.write("********************************************************\n")
# file_obj.close()
self.control.time_step = 0
self.OBS = self.init_OBS
self.control.OBS = self.init_OBS
self.dead_pirates = []
self.control.dead_pirates = []
self.make_grid()
self.temp_image = self.outfile
tempim = PhotoImage(file=self.temp_image, master=self.root)
# changes image here
im_label.config(image=tempim)
im_label.image = tempim
im_label.pack()
self.dead_flag = True
def check_status():
"""checks every second whether the start button has been pressed"""
if self.start_flag:
if not self.control.reset_flag:
self.t.start()
self.start_flag = False
self.root.after(1000, check_status)
frame.grid(row=2, columnspan=4)
update()
check_status()
self.root.mainloop()
def make_grid(self):
"""divides the given background image into given number of blocks, saves the image to outfile.gif
in the directory"""
w, h = 600, 600
data = np.zeros((h, w, 3), dtype=np.uint8)
temp_im = Image.open(self.BACKGROUND).convert('RGB')
data[:600, :600, :] = scipy.misc.imresize(temp_im, (600, 600))
block_length = 600 / self.BOUNDARY
div_length = 2
for i in range(0, self.BOUNDARY - 1):
anchor = (i + 1) * block_length
data[anchor - div_length:anchor + div_length, :, :] = [192, 192, 192]
data[:, anchor - div_length:anchor + div_length, :] = [192, 192, 192]
# hanging the target
self.hang_square_object(data, block_length, self.target_file, self.GOAL_X, self.GOAL_Y)
# hanging the obstacles
for i in range(len(self.OBS)):
self.hang_square_object(data, block_length, self.obstacle_file, self.OBS[i].location[0],
self.OBS[i].location[1])
# hanging the killed obstacles
for i in range(len(self.dead_pirates)):
self.hang_square_object(data, block_length, self.dead_pirates_file, self.dead_pirates[i][0],
self.dead_pirates[i][1])
# path added to the graph
for i in range(len(self.OBS)):
temp_obs = self.OBS[i]
for j in range(len(temp_obs.path)-1):
loc1 = temp_obs.path[j]
loc2 = temp_obs.path[j+1]
self.hang_path(data, block_length, loc1[0], loc1[1], loc2[0], loc2[1])
# hanging robot
self.hang_robot(block_length, data)
scipy.misc.imsave(self.outfile, data)
def hang_path(self, array, block_length, x1, y1, x2, y2):
"""hangs the designated object on the GUI (either the target or the obstacle(s))"""
if x1 == x2:
# horizontal
if y1 < y2:
filename = self.path2_file
else:
y1 = y2
filename = self.path1_file
target = Image.open(filename).convert('RGB')
startx = x1 * block_length + (block_length / 4) + (1 * block_length / 4)
finx = x1 * block_length + (block_length / 4) + (1 * block_length / 4) + (block_length / 2 / 10)
starty = y1 * block_length + (block_length / 4) + (2 * block_length / 4)
finy = y1 * block_length + (block_length / 4) + (2 * block_length / 4) + (block_length / 2)
array[startx:finx, starty:finy, :] = scipy.misc.imresize(target, (block_length / 2 / 10, block_length / 2))
else:
# vertical
if x1 < x2:
filename = self.path4_file
else:
x1 = x2
filename = self.path3_file
target = Image.open(filename).convert('RGB')
startx = x1 * block_length + (3 * block_length / 4)
finx = x1 * block_length + (3 * block_length / 4) + (block_length / 2)
starty = y1 * block_length + (2 * block_length / 4)
finy = y1 * block_length + (2 * block_length / 4) + (block_length / 2 / 10)
array[startx:finx, starty:finy, :] = scipy.misc.imresize(target, (block_length / 2, block_length / 2 / 10))
def hang_square_object(self, array, block_length, filename, x, y):
"""hangs the designated object on the GUI (either the target or the obstacle(s))"""
target = Image.open(filename).convert('RGB')
startx = x * block_length + (block_length / 4)
finx = x * block_length + (3 * block_length / 4)
starty = y * block_length + (block_length / 4)
finy = y * block_length + (3 * block_length / 4)
array[startx:finx, starty:finy, :] = scipy.misc.imresize(target, (block_length / 2, block_length / 2))
def hang_robot(self, block_length, array):
"""hangs the robot according to its current position"""
if self.control.direction == G.SOUTH:
self.hang_square_object(array, block_length, self.bot0_file, self.control.robotX, self.control.robotY)
elif self.control.direction == G.EAST:
self.hang_square_object(array, block_length, self.bot1_file, self.control.robotX, self.control.robotY)
elif self.control.direction == G.NORTH:
self.hang_square_object(array, block_length, self.bot2_file, self.control.robotX, self.control.robotY)
elif self.control.direction == G.WEST:
self.hang_square_object(array, block_length, self.bot3_file, self.control.robotX, self.control.robotY)
g = Gui()
g.make_GUI()
|
initialize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from ..libraries import __version__ as version
from ..libraries.project_check import ProjectCheck
from ..libraries.tools import save_sysetting, get_setting
from ..libraries.messages import Messages
class Initialize(ProjectCheck):
"""
Runs the init command to start working with a new board
Initialize a new folder you need to know the board id
and pass it as an argument in the class
Initialize(board_id)
The code will run in a new thread to avoid block the
execution of the sublime text while platformio is working
"""
def __init__(self):
super(Initialize, self).__init__()
self.init_option = None
messages = Messages()
messages.initial_text('_deviot_starting{0}', version)
messages.create_panel()
self.init(messages=messages)
self.print = messages.print
def add_board(self):
"""New Board
Adds a new board to the environments of platformio
this new board will be stored in the platformio.ini
file and will be use with the plugin
Arguments:
board_id {str} -- name of the board to initialize
Returns:
bool -- true if the board was succefully intilized or if it
was already initialized, if there was an error, false
"""
self.check_board_selected()
if(not self.board_id):
return
envs = self.get_envs_initialized()
if(envs and self.board_id in envs):
return True
cmd = ['init', '-b ', self.board_id]
self.run_command(cmd)
self.structurize_project()
def nonblock_add_board(self):
"""New Thread Execution
Starts a new thread to run the add_board method
Arguments:
board_id {str} -- id_of the board to initialize
"""
from threading import Thread
thread = Thread(target=self.add_board)
thread.start()
def after_complete(self):
"""At complete
This method will run functions after complete a compilation
or upload an sketch. You should only put here a fuction or
a method
"""
pio_untouch = get_setting('pio_untouch', False)
if(pio_untouch):
# remove lib_extra_dirs option
self.add_option('lib_extra_dirs', wipe=True)
# remove programmer flags
self.programmer(wipe=True)
# remove upload_speed
self.add_option('upload_speed', wipe=True)
# none last action
save_sysetting('last_action', None)
|
rebalance.py | #!/usr/bin/env python3
from pyln.client import Plugin, Millisatoshi, RpcError
from threading import Thread, Lock
from datetime import timedelta
import time
import uuid
plugin = Plugin()
plugin.rebalance_stop = False
def setup_routing_fees(plugin, route, msatoshi):
delay = plugin.cltv_final
for r in reversed(route):
r['msatoshi'] = msatoshi.millisatoshis
r['amount_msat'] = msatoshi
r['delay'] = delay
channels = plugin.rpc.listchannels(r['channel'])
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
fee = Millisatoshi(ch['base_fee_millisatoshi'])
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
fee += (msatoshi * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
msatoshi += fee
delay += ch['delay']
def get_channel(plugin, payload, peer_id, scid, check_state: bool = False):
peer = plugin.rpc.listpeers(peer_id).get('peers')[0]
channel = next(c for c in peer['channels'] if c.get('short_channel_id') == scid)
if check_state:
if channel['state'] != "CHANNELD_NORMAL":
raise RpcError('rebalance', payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
if not peer['connected']:
raise RpcError('rebalance', payload, {'message': 'Channel %s peer is not connected.' % scid})
return channel
def amounts_from_scid(plugin, scid):
channels = plugin.rpc.listfunds().get('channels')
channel = next(c for c in channels if c.get('short_channel_id') == scid)
our_msat = Millisatoshi(channel['our_amount_msat'])
total_msat = Millisatoshi(channel['amount_msat'])
return our_msat, total_msat
def peer_from_scid(plugin, short_channel_id, my_node_id, payload):
channels = plugin.rpc.listchannels(short_channel_id).get('channels')
for ch in channels:
if ch['source'] == my_node_id:
return ch['destination']
raise RpcError("rebalance", payload, {'message': 'Cannot find peer for channel: ' + short_channel_id})
def find_worst_channel(route):
if len(route) < 4:
return None
start_idx = 2
worst = route[start_idx]
worst_val = route[start_idx - 1]['msatoshi'] - route[start_idx]['msatoshi']
for i in range(start_idx + 1, len(route) - 1):
val = route[i - 1]['msatoshi'] - route[i]['msatoshi']
if val > worst_val:
worst = route[i]
worst_val = val
return worst
def cleanup(plugin, label, payload, rpc_result, error=None):
try:
plugin.rpc.delinvoice(label, 'unpaid')
except RpcError as e:
# race condition: waitsendpay timed out, but invoice get paid
if 'status is paid' in e.error.get('message', ""):
return rpc_result
if error is not None and isinstance(error, RpcError):
# unwrap rebalance errors as 'normal' RPC result
if error.method == "rebalance":
return {"status": "exception",
"message": error.error.get('message', "error not given")}
raise error
return rpc_result
# This function calculates the optimal rebalance amount
# based on the selected channels capacity and state.
# It will return a value that brings at least one of the channels to balance.
# It will raise an error, when this isnt possible.
#
# EXAMPLE
# |------------------- out_total -------------|
# OUT -v => |-------- out_ours -------||-- out_theirs --| => +v
#
# IN +v <= |-- in_ours --||---------- in_theirs ---------| <= -v
# |--------- in_total --------------------------|
#
# CHEAP SOLUTION: take v_min from 50/50 values
# O* vo = out_ours - (out_total/2)
# I* vi = (in_total/2) - in_ours
# return min(vo, vi)
#
# ... and cover edge cases with exceeding in/out capacity or negative values.
def calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload):
out_ours, out_total = int(out_ours), int(out_total)
in_ours, in_total = int(in_ours), int(in_total)
in_theirs = in_total - in_ours
vo = int(out_ours - (out_total / 2))
vi = int((in_total / 2) - in_ours)
# cases where one option can be eliminated because it exceeds other capacity
if vo > in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi > out_ours and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# cases where one channel is still capable to bring other to balance
if vo < 0 and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi < 0 and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# when both options are possible take the one with least effort
if vo > 0 and vo < in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(min(vi, vo))
raise RpcError("rebalance", payload, {'message': 'rebalancing these channels will make things worse'})
class NoRouteException(Exception):
pass
def getroute_basic(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
try:
""" This does not make special assumptions and tries all routes
it gets. Uses less CPU and does not filter any routes.
"""
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi,
maxhops=plugin.maxhops,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
raise NoRouteException
raise e
def getroute_iterative(plugin: Plugin, targetid, fromid, excludes, msatoshi: Millisatoshi):
""" This searches for 'shorter and bigger pipes' first in order
to increase likelyhood of success on short timeout.
Can be useful for manual `rebalance`.
"""
try:
return plugin.rpc.getroute(targetid,
fromid=fromid,
exclude=excludes,
msatoshi=msatoshi * plugin.msatfactoridx,
maxhops=plugin.maxhopidx,
riskfactor=10, cltv=9)
except RpcError as e:
# could not find route -> change params and restart loop
if e.method == "getroute" and e.error.get('code') == 205:
# reduce _msatfactor to look for smaller channels now
plugin.msatfactoridx -= 1
if plugin.msatfactoridx < 1:
# when we reached neutral msat factor:
# increase _maxhops and restart with msatfactor
plugin.maxhopidx += 1
plugin.msatfactoridx = plugin.msatfactor
# abort if we reached maxhop limit
if plugin.maxhops > 0 and plugin.maxhopidx > plugin.maxhops:
raise NoRouteException
raise e
def getroute_switch(method_name):
switch = {
"basic": getroute_basic,
"iterative": getroute_iterative
}
return switch.get(method_name, getroute_iterative)
@plugin.method("rebalance")
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
retry_for: int = 60, maxfeepercent: float = 0.5,
exemptfee: Millisatoshi = Millisatoshi(5000),
getroute_method=None):
"""Rebalancing channel liquidity with circular payments.
This tool helps to move some msatoshis between your channels.
"""
if msatoshi:
msatoshi = Millisatoshi(msatoshi)
retry_for = int(retry_for)
maxfeepercent = float(maxfeepercent)
if getroute_method is None:
getroute = plugin.getroute
else:
getroute = getroute_switch(getroute_method)
exemptfee = Millisatoshi(exemptfee)
payload = {
"outgoing_scid": outgoing_scid,
"incoming_scid": incoming_scid,
"msatoshi": msatoshi,
"retry_for": retry_for,
"maxfeepercent": maxfeepercent,
"exemptfee": exemptfee
}
my_node_id = plugin.rpc.getinfo().get('id')
outgoing_node_id = peer_from_scid(plugin, outgoing_scid, my_node_id, payload)
incoming_node_id = peer_from_scid(plugin, incoming_scid, my_node_id, payload)
get_channel(plugin, payload, outgoing_node_id, outgoing_scid, True)
get_channel(plugin, payload, incoming_node_id, incoming_scid, True)
out_ours, out_total = amounts_from_scid(plugin, outgoing_scid)
in_ours, in_total = amounts_from_scid(plugin, incoming_scid)
# If amount was not given, calculate a suitable 50/50 rebalance amount
if msatoshi is None:
msatoshi = calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload)
plugin.log("Estimating optimal amount %s" % msatoshi)
# Check requested amounts are selected channels
if msatoshi > out_ours or msatoshi > in_total - in_ours:
raise RpcError("rebalance", payload, {'message': 'Channel capacities too low'})
plugin.log(f"starting rebalance out_scid:{outgoing_scid} in_scid:{incoming_scid} amount:{msatoshi}", 'debug')
route_out = {'id': outgoing_node_id, 'channel': outgoing_scid, 'direction': int(not my_node_id < outgoing_node_id)}
route_in = {'id': my_node_id, 'channel': incoming_scid, 'direction': int(not incoming_node_id < my_node_id)}
start_ts = int(time.time())
label = "Rebalance-" + str(uuid.uuid4())
description = "%s to %s" % (outgoing_scid, incoming_scid)
invoice = plugin.rpc.invoice(msatoshi, label, description, retry_for + 60)
payment_hash = invoice['payment_hash']
rpc_result = None
excludes = [my_node_id] # excude all own channels to prevent shortcuts
nodes = {} # here we store erring node counts
plugin.maxhopidx = 1 # start with short routes and increase
plugin.msatfactoridx = plugin.msatfactor # start with high capacity factor
# and decrease to reduce WIRE_TEMPORARY failures because of imbalances
# 'disable' maxhops filter if set to <= 0
# I know this is ugly, but we don't ruin the rest of the code this way
if plugin.maxhops <= 0:
plugin.maxhopidx = 20
# trace stats
count = 0
count_sendpay = 0
time_getroute = 0
time_sendpay = 0
try:
while int(time.time()) - start_ts < retry_for and not plugin.rebalance_stop:
count += 1
try:
time_start = time.time()
r = getroute(plugin,
targetid=incoming_node_id,
fromid=outgoing_node_id,
excludes=excludes,
msatoshi=msatoshi)
time_getroute += time.time() - time_start
except NoRouteException:
# no more chance for a successful getroute
rpc_result = {'status': 'error', 'message': 'No suitable routes found'}
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
# getroute can be successful next time with different parameters
if e.method == "getroute" and e.error.get('code') == 205:
continue
else:
raise e
route_mid = r['route']
route = [route_out] + route_mid + [route_in]
setup_routing_fees(plugin, route, msatoshi)
fees = route[0]['amount_msat'] - msatoshi
# check fee and exclude worst channel the next time
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
if fees > exemptfee and int(fees) > int(msatoshi) * maxfeepercent / 100:
worst_channel = find_worst_channel(route)
if worst_channel is None:
raise RpcError("rebalance", payload, {'message': 'Insufficient fee'})
excludes.append(worst_channel['channel'] + '/' + str(worst_channel['direction']))
continue
rpc_result = {"sent": msatoshi + fees, "received": msatoshi, "fee": fees, "hops": len(route),
"outgoing_scid": outgoing_scid, "incoming_scid": incoming_scid, "status": "complete",
"message": f"{msatoshi + fees} sent over {len(route)} hops to rebalance {msatoshi}"}
plugin.log("Sending %s over %d hops to rebalance %s" % (msatoshi + fees, len(route), msatoshi), 'debug')
for r in route:
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], r['amount_msat']), 'debug')
time_start = time.time()
count_sendpay += 1
try:
plugin.rpc.sendpay(route, payment_hash)
running_for = int(time.time()) - start_ts
result = plugin.rpc.waitsendpay(payment_hash, max(retry_for - running_for, 0))
time_sendpay += time.time() - time_start
if result.get('status') == "complete":
rpc_result["stats"] = f"running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}"
return cleanup(plugin, label, payload, rpc_result)
except RpcError as e:
time_sendpay += time.time() - time_start
plugin.log(f"maxhops:{plugin.maxhopidx} msatfactor:{plugin.msatfactoridx} running_for:{int(time.time()) - start_ts} count_getroute:{count} time_getroute:{time_getroute} time_getroute_avg:{time_getroute / count} count_sendpay:{count_sendpay} time_sendpay:{time_sendpay} time_sendpay_avg:{time_sendpay / count_sendpay}", 'debug')
# plugin.log(f"RpcError: {str(e)}", 'debug')
# check if we ran into the `rpc.waitsendpay` timeout
if e.method == "waitsendpay" and e.error.get('code') == 200:
raise RpcError("rebalance", payload, {'message': 'Timeout reached'})
# check if we have problems with our own channels
erring_node = e.error.get('data', {}).get('erring_node')
erring_channel = e.error.get('data', {}).get('erring_channel')
erring_direction = e.error.get('data', {}).get('erring_direction')
if erring_channel == incoming_scid:
raise RpcError("rebalance", payload, {'message': 'Error with incoming channel'})
if erring_channel == outgoing_scid:
raise RpcError("rebalance", payload, {'message': 'Error with outgoing channel'})
# exclude other erroring channels
if erring_channel is not None and erring_direction is not None:
excludes.append(erring_channel + '/' + str(erring_direction))
# count and exclude nodes that produce a lot of errors
if erring_node and plugin.erringnodes > 0:
if nodes.get(erring_node) is None:
nodes[erring_node] = 0
nodes[erring_node] += 1
if nodes[erring_node] >= plugin.erringnodes:
excludes.append(erring_node)
except Exception as e:
return cleanup(plugin, label, payload, rpc_result, e)
rpc_result = {'status': 'error', 'message': 'Timeout reached'}
return cleanup(plugin, label, payload, rpc_result)
def a_minus_b(a: Millisatoshi, b: Millisatoshi):
# a minus b, but Millisatoshi cannot be negative
return a - b if a > b else Millisatoshi(0)
def must_send(liquidity):
# liquidity is too high, must send some sats
return a_minus_b(liquidity["min"], liquidity["their"])
def should_send(liquidity):
# liquidity is a bit high, would be good to send some sats
return a_minus_b(liquidity["ideal"]["their"], liquidity["their"])
def could_send(liquidity):
# liquidity maybe a bit low, but can send some more sats, if needed
return a_minus_b(liquidity["our"], liquidity["min"])
def must_receive(liquidity):
# liquidity is too low, must receive some sats
return a_minus_b(liquidity["min"], liquidity["our"])
def should_receive(liquidity):
# liquidity is a bit low, would be good to receive some sats
return a_minus_b(liquidity["ideal"]["our"], liquidity["our"])
def could_receive(liquidity):
# liquidity maybe a bit high, but can receive some more sats, if needed
return a_minus_b(liquidity["their"], liquidity["min"])
def get_open_channels(plugin: Plugin):
channels = []
for peer in plugin.rpc.listpeers()["peers"]:
for ch in peer["channels"]:
if ch["state"] == "CHANNELD_NORMAL" and not ch["private"]:
channels.append(ch)
return channels
def check_liquidity_threshold(channels: list, threshold: Millisatoshi):
# check if overall rebalances can be successful with this threshold
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
required = Millisatoshi(0)
for ch in channels:
required += min(threshold, ch["total_msat"] / 2)
return required < our and required < total - our
def get_enough_liquidity_threshold(channels: list):
low = Millisatoshi(0)
biggest_channel = max(channels, key=lambda ch: ch["total_msat"])
high = biggest_channel["total_msat"] / 2
while True:
mid = (low + high) / 2
if high - low < Millisatoshi("1sat"):
break
if check_liquidity_threshold(channels, mid):
low = mid
else:
high = mid
return mid / 2
def get_ideal_ratio(channels: list, enough_liquidity: Millisatoshi):
# ideal liquidity ratio for big channels:
# small channels should have a 50/50 liquidity ratio to be usable
# and big channels can store the remaining liquidity above the threshold
assert len(channels) > 0
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
chs = list(channels) # get a copy!
while len(chs) > 0:
ratio = int(our) / int(total)
smallest_channel = min(chs, key=lambda ch: ch["total_msat"])
if smallest_channel["total_msat"] * min(ratio, 1 - ratio) > enough_liquidity:
break
min_liquidity = min(smallest_channel["total_msat"] / 2, enough_liquidity)
diff = smallest_channel["total_msat"] * ratio
diff = max(diff, min_liquidity)
diff = min(diff, smallest_channel["total_msat"] - min_liquidity)
our -= diff
total -= smallest_channel["total_msat"]
chs.remove(smallest_channel)
assert 0 <= ratio and ratio <= 1
return ratio
def feeadjust_would_be_nice(plugin: Plugin):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjust"]
if len(commands) == 1:
msg = plugin.rpc.feeadjust()
plugin.log(f"Feeadjust succeeded: {msg}")
else:
plugin.log("The feeadjuster plugin would be useful here")
def get_max_amount(i: int, plugin: Plugin):
return max(plugin.min_amount, plugin.enough_liquidity / (4**(i + 1)))
def get_max_fee(plugin: Plugin, msat: Millisatoshi):
# TODO: sanity check
return (plugin.fee_base + msat * plugin.fee_ppm / 10**6) * plugin.feeratio
def get_chan(plugin: Plugin, scid: str):
for peer in plugin.rpc.listpeers()["peers"]:
if len(peer["channels"]) == 0:
continue
# We might have multiple channel entries ! Eg if one was just closed
# and reopened.
for chan in peer["channels"]:
if chan.get("short_channel_id") == scid:
return chan
def liquidity_info(channel, enough_liquidity: Millisatoshi, ideal_ratio: float):
liquidity = {
"our": channel["to_us_msat"],
"their": channel["total_msat"] - channel["to_us_msat"],
"min": min(enough_liquidity, channel["total_msat"] / 2),
"max": max(a_minus_b(channel["total_msat"], enough_liquidity), channel["total_msat"] / 2),
"ideal": {}
}
liquidity["ideal"]["our"] = min(max(channel["total_msat"] * ideal_ratio, liquidity["min"]), liquidity["max"])
liquidity["ideal"]["their"] = min(max(channel["total_msat"] * (1 - ideal_ratio), liquidity["min"]), liquidity["max"])
return liquidity
def wait_for(success, timeout: int = 60):
# cyclical lambda helper
# taken and modified from pyln-testing/pyln/testing/utils.py
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
return False
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
return True
def wait_for_htlcs(plugin, failed_channels: list, scids: list = None):
# HTLC settlement helper
# taken and modified from pyln-testing/pyln/testing/utils.py
result = True
peers = plugin.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel.get('short_channel_id') not in scids:
continue
if channel.get('short_channel_id') in failed_channels:
result = False
continue
if 'htlcs' in channel:
if not wait_for(lambda: len(plugin.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0):
failed_channels.append(channel.get('short_channel_id'))
plugin.log(f"Timeout while waiting for htlc settlement in channel {channel.get('short_channel_id')}")
result = False
return result
def maybe_rebalance_pairs(plugin: Plugin, ch1, ch2, failed_channels: list):
scid1 = ch1["short_channel_id"]
scid2 = ch2["short_channel_id"]
result = {"success": False, "fee_spent": Millisatoshi(0)}
if scid1 + ":" + scid2 in failed_channels:
return result
# check if HTLCs are settled
if not wait_for_htlcs(plugin, failed_channels, [scid1, scid2]):
return result
i = 0
while not plugin.rebalance_stop:
liquidity1 = liquidity_info(ch1, plugin.enough_liquidity, plugin.ideal_ratio)
liquidity2 = liquidity_info(ch2, plugin.enough_liquidity, plugin.ideal_ratio)
amount1 = min(must_send(liquidity1), could_receive(liquidity2))
amount2 = min(should_send(liquidity1), should_receive(liquidity2))
amount3 = min(could_send(liquidity1), must_receive(liquidity2))
amount = max(amount1, amount2, amount3)
if amount < plugin.min_amount:
return result
amount = min(amount, get_max_amount(i, plugin))
maxfee = get_max_fee(plugin, amount)
plugin.log(f"Try to rebalance: {scid1} -> {scid2}; amount={amount}; maxfee={maxfee}")
start_ts = time.time()
try:
res = rebalance(plugin, outgoing_scid=scid1, incoming_scid=scid2,
msatoshi=amount, retry_for=1200, maxfeepercent=0,
exemptfee=maxfee)
if not res.get('status') == 'complete':
raise Exception # fall into exception handler below
except Exception:
failed_channels.append(scid1 + ":" + scid2)
# rebalance failed, let's try with a smaller amount
while (get_max_amount(i, plugin) >= amount and
get_max_amount(i, plugin) != get_max_amount(i + 1, plugin)):
i += 1
if amount > get_max_amount(i, plugin):
continue
return result
result["success"] = True
result["fee_spent"] += res["fee"]
htlc_start_ts = time.time()
# wait for settlement
htlc_success = wait_for_htlcs(plugin, failed_channels, [scid1, scid2])
current_ts = time.time()
res["elapsed_time"] = str(timedelta(seconds=current_ts - start_ts))[:-3]
res["htlc_time"] = str(timedelta(seconds=current_ts - htlc_start_ts))[:-3]
plugin.log(f"Rebalance succeeded: {res}")
if not htlc_success:
return result
ch1 = get_chan(plugin, scid1)
assert ch1 is not None
ch2 = get_chan(plugin, scid2)
assert ch2 is not None
return result
def maybe_rebalance_once(plugin: Plugin, failed_channels: list):
channels = get_open_channels(plugin)
for ch1 in channels:
for ch2 in channels:
if ch1 == ch2:
continue
result = maybe_rebalance_pairs(plugin, ch1, ch2, failed_channels)
if result["success"] or plugin.rebalance_stop:
return result
return {"success": False, "fee_spent": Millisatoshi(0)}
def feeadjuster_toggle(plugin: Plugin, new_value: bool):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjuster-toggle"]
if len(commands) == 1:
msg = plugin.rpc.feeadjuster_toggle(new_value)
return msg["forward_event_subscription"]["previous"]
else:
return True
def rebalanceall_thread(plugin: Plugin):
if not plugin.mutex.acquire(blocking=False):
return
try:
start_ts = time.time()
feeadjuster_state = feeadjuster_toggle(plugin, False)
channels = get_open_channels(plugin)
plugin.enough_liquidity = get_enough_liquidity_threshold(channels)
plugin.ideal_ratio = get_ideal_ratio(channels, plugin.enough_liquidity)
plugin.log(f"Automatic rebalance is running with enough liquidity threshold: {plugin.enough_liquidity}, "
f"ideal liquidity ratio: {plugin.ideal_ratio * 100:.2f}%, "
f"min rebalancable amount: {plugin.min_amount}, "
f"feeratio: {plugin.feeratio}")
failed_channels = []
success = 0
fee_spent = Millisatoshi(0)
while not plugin.rebalance_stop:
result = maybe_rebalance_once(plugin, failed_channels)
if not result["success"]:
break
success += 1
fee_spent += result["fee_spent"]
feeadjust_would_be_nice(plugin)
feeadjuster_toggle(plugin, feeadjuster_state)
elapsed_time = timedelta(seconds=time.time() - start_ts)
plugin.rebalanceall_msg = f"Automatic rebalance finished: {success} successful rebalance, {fee_spent} fee spent, it took {str(elapsed_time)[:-3]}"
plugin.log(plugin.rebalanceall_msg)
finally:
plugin.mutex.release()
@plugin.method("rebalanceall")
def rebalanceall(plugin: Plugin, min_amount: Millisatoshi = Millisatoshi("50000sat"), feeratio: float = 0.5):
"""Rebalance all unbalanced channels if possible for a very low fee.
Default minimum rebalancable amount is 50000sat. Default feeratio = 0.5, half of our node's default fee.
To be economical, it tries to fix the liquidity cheaper than it can be ruined by transaction forwards.
It may run for a long time (hours) in the background, but can be stopped with the rebalancestop method.
"""
# some early checks before we start the async thread
if plugin.mutex.locked():
return {"message": "Rebalance is already running, this may take a while. To stop it use the cli method 'rebalancestop'."}
channels = get_open_channels(plugin)
if len(channels) <= 1:
return {"message": "Error: Not enough open channels to rebalance anything"}
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
min_amount = Millisatoshi(min_amount)
if total - our < min_amount or our < min_amount:
return {"message": "Error: Not enough liquidity to rebalance anything"}
# param parsing ensure correct type
plugin.feeratio = float(feeratio)
plugin.min_amount = min_amount
# run the job
t = Thread(target=rebalanceall_thread, args=(plugin, ))
t.start()
return {"message": f"Rebalance started with min rebalancable amount: {plugin.min_amount}, feeratio: {plugin.feeratio}"}
@plugin.method("rebalancestop")
def rebalancestop(plugin: Plugin):
"""It stops the ongoing rebalanceall.
"""
if not plugin.mutex.locked():
if plugin.rebalanceall_msg is None:
return {"message": "No rebalance is running, nothing to stop."}
return {"message": f"No rebalance is running, nothing to stop. "
f"Last 'rebalanceall' gave: {plugin.rebalanceall_msg}"}
plugin.rebalance_stop = True
plugin.mutex.acquire(blocking=True)
plugin.rebalance_stop = False
plugin.mutex.release()
return {"message": plugin.rebalanceall_msg}
def health_score(liquidity):
if int(liquidity["ideal"]["our"]) == 0 or int(liquidity["ideal"]["their"]) == 0 or int(liquidity["min"]) == 0:
return 0
score_our = int(liquidity["our"]) / int(liquidity["ideal"]["our"])
score_their = int(liquidity["their"]) / int(liquidity["ideal"]["their"])
# distance from ideal liquidity (between 50 and 100)
score = min(score_our, score_their) * 50 + 50
coefficient_our = int(liquidity["our"]) / int(liquidity["min"])
coefficient_their = int(liquidity["their"]) / int(liquidity["min"])
# distance from minimal liquidity as a coefficient (between 0 and 1)
coefficient = min(coefficient_our, coefficient_their, 1)
return score * coefficient
@plugin.method("rebalancereport")
def rebalancereport(plugin: Plugin):
"""Show information about rebalance
"""
res = {}
res["rebalanceall_is_running"] = plugin.mutex.locked()
res["getroute_method"] = plugin.getroute.__name__
res["maxhops_threshold"] = plugin.maxhops
res["msatfactor"] = plugin.msatfactor
res["erringnodes_threshold"] = plugin.erringnodes
channels = get_open_channels(plugin)
health_percent = 0.0
if len(channels) > 1:
enough_liquidity = get_enough_liquidity_threshold(channels)
ideal_ratio = get_ideal_ratio(channels, enough_liquidity)
res["enough_liquidity_threshold"] = enough_liquidity
res["ideal_liquidity_ratio"] = f"{ideal_ratio * 100:.2f}%"
for ch in channels:
liquidity = liquidity_info(ch, enough_liquidity, ideal_ratio)
health_percent += health_score(liquidity) * int(ch["total_msat"])
health_percent /= int(sum(ch["total_msat"] for ch in channels))
else:
res["enough_liquidity_threshold"] = Millisatoshi(0)
res["ideal_liquidity_ratio"] = "0%"
res["liquidity_health"] = f"{health_percent:.2f}%"
invoices = plugin.rpc.listinvoices()['invoices']
rebalances = [i for i in invoices if i.get('status') == 'paid' and i.get('label').startswith("Rebalance")]
total_fee = Millisatoshi(0)
total_amount = Millisatoshi(0)
res["total_successful_rebalances"] = len(rebalances)
for r in rebalances:
try:
pay = plugin.rpc.listpays(r["bolt11"])["pays"][0]
total_amount += pay["amount_msat"]
total_fee += pay["amount_sent_msat"] - pay["amount_msat"]
except Exception:
res["total_successful_rebalances"] -= 1
res["total_rebalanced_amount"] = total_amount
res["total_rebalance_fee"] = total_fee
if total_amount > Millisatoshi(0):
res["average_rebalance_fee_ppm"] = round(total_fee / total_amount * 10**6, 2)
else:
res["average_rebalance_fee_ppm"] = 0
return res
@plugin.init()
def init(options, configuration, plugin):
config = plugin.rpc.listconfigs()
plugin.cltv_final = config.get("cltv-final")
plugin.fee_base = Millisatoshi(config.get("fee-base"))
plugin.fee_ppm = config.get("fee-per-satoshi")
plugin.mutex = Lock()
plugin.maxhops = int(options.get("rebalance-maxhops"))
plugin.msatfactor = float(options.get("rebalance-msatfactor"))
plugin.erringnodes = int(options.get("rebalance-erringnodes"))
plugin.getroute = getroute_switch(options.get("rebalance-getroute"))
plugin.rebalanceall_msg = None
plugin.log(f"Plugin rebalance initialized with {plugin.fee_base} base / {plugin.fee_ppm} ppm fee "
f"cltv_final:{plugin.cltv_final} "
f"maxhops:{plugin.maxhops} "
f"msatfactor:{plugin.msatfactor} "
f"erringnodes:{plugin.erringnodes} "
f"getroute: {plugin.getroute.__name__}")
plugin.add_option(
"rebalance-getroute",
"iterative",
"Getroute method for route search can be 'basic' or 'iterative'."
"'basic': Tries all routes sequentially. "
"'iterative': Tries shorter and bigger routes first.",
"string"
)
plugin.add_option(
"rebalance-maxhops",
"5",
"Maximum number of hops for `getroute` call. Set to 0 to disable. "
"Note: Two hops are added for own nodes input and output channel. "
"Note: Routes with a 8 or more hops have less than 3% success rate.",
"string"
)
plugin.add_option(
"rebalance-msatfactor",
"4",
"Will instruct `getroute` call to use higher requested capacity first. "
"Note: This will decrease to 1 when no routes can be found.",
"string"
)
plugin.add_option(
"rebalance-erringnodes",
"5",
"Exclude nodes from routing that raised N or more errors. "
"Note: Use 0 to disable.",
"string"
)
plugin.run()
|
games_cog.py | import asyncio
import json
import random
import re
import requests
from multiprocessing import Process, Queue
from typing import Union
import discord
from discord.ext import commands
from lib.connectX import Board as ConnectX
from lib.discord_interface import add_choices_message, wait_for_choice, remove_choices
from lib.emoji import extract_emoji
from lib.emotes import basic_emoji
from lib.minesweeper import Minesweeper
from lib.player import Player
class Games(commands.Cog):
"""Various fun games"""
def __init__(self, bot):
self.bot = bot
self.user_icon = {self.bot.user.id: "🔴"}
def user_icons(self, user1: discord.User, user2: discord.User):
"""Return currently set user icons or default if not set"""
if user1.id == user2.id:
return "🟡", "🔴"
if user1.id in self.user_icon:
yellow = self.user_icon[user1.id]
else:
yellow = "🟡"
if user2.id in self.user_icon:
red = self.user_icon[user2.id]
else:
red = "🔴"
return yellow, red
@commands.command(name="icon", aliases=["set"], help="Set any emoji as your icon")
async def set_icon(self, ctx, emote: str = ""):
"""Change user's icon to emoji"""
if len(emote) == 0:
await ctx.send("No emote specified")
await ctx.message.add_reaction(basic_emoji.get("Si"))
# Find every Discord emote
discord_emotes = re.findall(r"<:\w*:\d*>", ctx.message.content)
# Unicode emojis compatible by default
compatible = extract_emoji(ctx.message.content)
# Filter foreign emotes
for e in discord_emotes:
for known in self.bot.emojis:
if e == str(known):
compatible.append(e)
# Remove duplicates
compatible = list(set(compatible))
# If user specified compatible custom emoji
if len(compatible) == 1:
self.user_icon[ctx.author.id] = str(compatible[0])
await ctx.message.add_reaction("✅")
elif len(compatible) == 0:
await ctx.send("I can't use that emote " + basic_emoji.get("Sadge"))
else:
await ctx.send("Too many emotes specified " + basic_emoji.get("Pepega"))
@commands.command(name="connect1", aliases=["connec1", "connec"], help="Play a game of Connect 1")
async def connect1(self, ctx, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""Connect 1 against another human or AI"""
await self.connect_x(ctx, width=1, height=1, pieces=1, depth=25, arg1=arg1, arg2=arg2)
@commands.command(name="connect2", help="Play a game of Connect 2")
async def connect2(self, ctx, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""Connect 2 against another human or AI"""
await self.connect_x(ctx, width=2, height=3, pieces=2, depth=25, arg1=arg1, arg2=arg2)
@commands.command(name="connect3", help="Play a game of Connect 3")
async def connect3(self, ctx, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""Connect 3 against another human or AI"""
await self.connect_x(ctx, width=5, height=4, pieces=3, depth=25, arg1=arg1, arg2=arg2)
@commands.command(name="connect4", aliases=["connect", "connectX"], help="Play a game of Connect 4")
async def connect4(self, ctx, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""Connect 4 against another human or AI"""
await self.connect_x(ctx, width=7, height=6, pieces=4, depth=6, arg1=arg1, arg2=arg2)
@commands.command(name="connect5", help="Play a game of Connect 5")
async def connect5(self, ctx, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""Connect 5 against another human or AI"""
await self.connect_x(ctx, width=10, height=9, pieces=5, depth=5, arg1=arg1, arg2=arg2)
async def connect_x(self, ctx, width: int, height: int, pieces: int, depth: int, arg1: Union[discord.user.User, str, None], arg2: Union[discord.user.User, str, None]):
"""ConnectX of variable size"""
# Parsing input
if isinstance(arg1, str):
user = arg2
emote = arg1
else:
user = arg1
emote = arg2
# Tagging a user creates User (or Member) class
if isinstance(user, discord.user.User) or isinstance(user, discord.member.Member):
# If user is a bot -> bot is unlikely to be able to play connect4
if user.bot:
await ctx.send("I don't think {0} would play with you ".format(user.mention) + basic_emoji.get("forsenSmug"))
return
# Tagged user is a human
ai = False
# User tagged themselves (permitted)
if user == ctx.message.author:
await ctx.message.add_reaction(basic_emoji.get("Pepega"))
await ctx.message.add_reaction(basic_emoji.get("Clap"))
# No tag provided -> play against AI
else:
ai = True
user = self.bot.user
# Bot vs Bot
bvb = False
if isinstance(user, discord.user.ClientUser) and isinstance(emote, discord.user.ClientUser):
bvb = True
elif isinstance(emote, str) and len(emote) != 0:
await self.set_icon(ctx, emote)
# Game setup
board = ConnectX(width, height, pieces)
columns = [None for _ in range(10)]
player1 = ctx.message.author
player2 = user
if bvb:
player1 = emote
player = Player(player1=player1, player2=player2, ai=ai)
player.shuffle()
# Message containing game
yellow, red = self.user_icons(player1, player2)
board_msg = await ctx.send(board.to_string(yellow, red) + "{0} on turn".format(player))
# Add numbers on first turn
reacts_added = False
while not board.game_over():
# If it's AI's turn
if player.on_turn() == 2 and ai or bvb:
# Update displayed board
yellow, red = self.user_icons(player1, player2)
await board_msg.edit(content=board.to_string(yellow, red) + basic_emoji.get("docSpin") + " {0} on turn".format(player))
# Run AI as new process (CPU heavy)
queue = Queue()
p = Process(target=board.get_ai_move_mp, args=(queue, 1, player.on_turn(), depth))
p.start()
p.join()
column = queue.get()
# If it's human's turn
else:
# Update displayed board
yellow, red = self.user_icons(player1, player2)
await board_msg.edit(content=board.to_string(yellow, red) + "{0} on turn".format(player))
# Add numbers if not already present
if not reacts_added:
reacts_added = True
try:
await board_msg.clear_reactions()
except discord.Forbidden:
await ctx.send("I am missing permission to manage messages (cannot remove reactions) " + basic_emoji.get("forsenT"))
except discord.HTTPException:
pass
columns = await add_choices_message(board_msg, width, cancellable=True)
# Wait for human to choose a column
column = await wait_for_choice(self.bot, player.get_user_on_turn(), board_msg, columns, cancellable=True) - 1
# No column chosen or player forfeited
if column < 0:
yellow, red = self.user_icons(player1, player2)
status = "forfeited" if column == -1 else "timed out"
await board_msg.edit(content=board.to_string(yellow, red) + "{0} {1}".format(player, status))
await remove_choices(board_msg)
return
# Drop piece down the selected column
board.drop_piece(column, player.on_turn())
# If it filled up the column, invalidate that column (can't be played again)
if not board.column_not_full(column):
columns[column] = "placeholder"
player.next()
# Game ended -> display result
yellow, red = self.user_icons(player1, player2)
if board.winner is not None:
await board_msg.edit(content=board.to_string(yellow, red) + "{0} won!".format(player[board.winner]))
else:
await board_msg.edit(content=board.to_string(yellow, red) + "It's a draw!")
await remove_choices(board_msg)
@commands.command(name="minesweeper", aliases=["mines"], help="Generate a minefield")
async def minesweeper(self, ctx, bombs: int = 25):
"""Displays a minefield"""
if bombs > 99:
await ctx.send("That's too many bombs.")
return
elif bombs < 1:
await ctx.send("That wouldn't be minesweeper, just floorsweeper.")
return
field = Minesweeper(width=10, height=10, bombs=bombs)
await ctx.send(field.to_string(spoiler=True))
@commands.command(name="quiz", aliases=["trivia"], help="I heard that you like Trivia Quiz...")
@commands.cooldown(1, 30, commands.BucketType.user)
async def quiz(self, ctx, arg: int = 1):
"""Trivia quiz"""
if arg > 10:
toomuch = await ctx.send("**What the hell? Way too many questions. Actually - 10 questions should be enough.**")
await asyncio.sleep(3)
await toomuch.delete()
arg = 10
if arg < 1:
impossible_arg = await ctx.send("Not possible, how about 3 questions instead?")
await asyncio.sleep(3)
await impossible_arg.delete()
arg = 3
i = 0
score = 0
timeout_count = 0
# Quiz cycle -> scrape next question and answers
while i < arg:
quiz = requests.get("https://opentdb.com/api.php?amount=1&type=multiple").json()
question = (quiz["results"][0]["question"])
question = question.replace(""", "\"")
question = question.replace("'", "\'")
question_msg = await ctx.send("**" + question + "**")
correct = (quiz["results"][0]["correct_answer"])
correct = correct.replace(""", "\"")
correct = correct.replace("'", "\'")
quiz_list = (quiz["results"][0]["incorrect_answers"])
quiz_list.append(correct)
random.shuffle(quiz_list)
quiz_list = [w.replace(""", "\"") for w in quiz_list]
quiz_list = [w.replace("'", "\'") for w in quiz_list]
corr_index = quiz_list.index(correct)
# Figure out which one is correct and compare with user's reaction
if corr_index == 0:
answer = "🇦"
elif corr_index == 1:
answer = "🇧"
elif corr_index == 2:
answer = "🇨"
else:
answer = "🇩"
# Add reactions
for x, word in enumerate(quiz_list):
if word == """:
quiz_list[x] = "\""
for x, word in enumerate(quiz_list):
if word == "'":
quiz_list[x] = "\'"
for x, word in enumerate(quiz_list):
if word == "&":
quiz_list[x] = "&"
answer_msg = await ctx.send(" | ".join(quiz_list))
await answer_msg.add_reaction("🇦")
await answer_msg.add_reaction("🇧")
await answer_msg.add_reaction("🇨")
await answer_msg.add_reaction("🇩")
# Waiting for the user's reaction
def check(reaction, user):
return str(reaction.emoji) and user == ctx.author
# Based on the reaction
try:
reaction, user = await self.bot.wait_for("reaction_add", timeout=25, check=check)
if str(reaction.emoji) == answer:
status_corr = await ctx.send("Yes, ** " + correct + " **is correct.")
await status_corr.add_reaction("👍")
await asyncio.sleep(3)
await status_corr.delete()
score = score + 1
if str(reaction.emoji) != answer:
status_icon = await ctx.send("No, I don't think so. **" + correct + " **is the right answer.")
await status_icon.add_reaction(basic_emoji.get("Sadge"))
await asyncio.sleep(5)
await status_icon.delete()
except asyncio.TimeoutError:
to = await ctx.send("You ran out of time!")
await to.add_reaction(basic_emoji.get("Pepega"))
await asyncio.sleep(3)
await to.delete()
timeout_count += 1
# Delete the question and repeat the cycle
i += 1
await question_msg.delete()
await answer_msg.delete()
# User AFK
if timeout_count == arg and arg > 1:
afk = await ctx.send("Timed out.")
await afk.add_reaction(basic_emoji.get("Si"))
return
# Conclusion
if i == arg and arg > 1:
count = score / arg
if count > 0.7:
conclusion_list = ["That's a lot of knowledge.", "Smart one, are not you?", "PhDr. Milan Beneš would be proud.", "Well met!", "Never doubt the god gamer!", "That was pretty good.", "EZ4ANTS"]
conclusion = random.choice(conclusion_list)
elif count > 0.5:
conclusion_list = ["Not Great, Not Terrible", "That was... pretty average, I guess?", "Nice try nonetheless.", "Enough points to pass my exam."]
conclusion = random.choice(conclusion_list)
elif count > 0.3:
conclusion_list = ["I can tell that this is not your lucky day, is it?", "Never lucky man ...", "Better luck next time!", "Pretty underwhelming."]
conclusion = random.choice(conclusion_list)
elif count > 0.1:
conclusion_list = ["MAN VA FAN.", "Terrible...", "Blame it on the black star.", "Just unlucky, right?", "Next time, you should try harder."]
conclusion = random.choice(conclusion_list)
else:
conclusion_list = ["You are trolling, right?", "Apparently you have got more chromosomes than I thought.", "Is this some kind of twisted joke?", "A total waste of time.", "ZULOL"]
conclusion = random.choice(conclusion_list)
await ctx.send("**You have answered " + str(score) + " out of " + str(arg) + " questions correctly. " + conclusion + "**")
@commands.command(name="hangman", aliases=["hm"], help="Hangman, word guessing minigame (tolerance of 7 mistakes)")
@commands.cooldown(1, 30, commands.BucketType.user)
async def hangman(self, ctx):
"""Play hangman"""
# Get random word
try:
response = requests.get("https://random-word-api.herokuapp.com/word?number=1")
response.raise_for_status()
word = response.json()[0]
word = word.upper()
except requests.HTTPError:
await ctx.send(f"Bad response (status code {response.status_code}) from https://random-word-api.herokuapp.com).")
return
except (json.JSONDecodeError, IndexError):
await ctx.send("No word returned from random-word-api.")
return
if not word.isalpha():
await ctx.send("Random word contained non-alphabetical character(s), try again.")
return
# Hangman itself
hangman_stages = [
"\n\n\n\n\n\n",
"\n|\n|\n|\n|\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n|\n|\n|\n|\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n| 🧢\n|\n|\n|\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n| 🧢\n| 😟\n|\n|\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n| 🧢\n| 😟\n| 👕\n|\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n| 🧢\n| 😟\n| 👕\n| 🩳\n|\n|",
"_-_-_-_-_-_-_-_-_-_-_-_-\n| 🧢\n| 💀\n| 👕\n| 🩳\n| 👞👞\n|"
]
hidden_word = f"`{len(word) * '_'}`"
hangman_msg = await ctx.send(f"{hangman_stages[0]}\n**React here ⬇️, this word has {len(word)} letters**")
hint = ""
# Game variables
mistakes = 0
mistake_threshold = len(hangman_stages) - 1
guessed_letters = set()
timeout = 300
# Watch for reaction made by user who called the command on bot's response message, only respond to 🇦-🇿 emojis
def check(reaction, user):
return str(reaction.emoji) in [chr(i + 127397) for i in range(ord("A"), ord("Z") + 1)] and user == ctx.author and reaction.message == hangman_msg
# Game loop
while "_" in hidden_word and mistakes < mistake_threshold:
try:
reaction, _ = await self.bot.wait_for("reaction_add", timeout=timeout, check=check)
except asyncio.TimeoutError:
break
# User reacted
letter = chr(ord(str(reaction.emoji)) - 127397)
# New letter
if letter not in guessed_letters:
guessed_letters.add(letter)
hint = ""
# Correct letter
if letter in word:
# Find all occurences of letter (indexes in string)
letter_occurrences = [pos for pos, char in enumerate(word) if char == letter]
# Reveal them
for letter_index in letter_occurrences:
hidden_word = hidden_word[:letter_index + 1] + letter + hidden_word[letter_index + 2:]
# Incorrect letter
else:
mistakes += 1
# Same letter guessed again
else:
hint = f" | Letter `{letter}` has already been guessed before."
# Update message
await hangman_msg.edit(content=f"{hangman_stages[mistakes]}\n{hidden_word}{hint}")
# Game loop ended
if "_" not in hidden_word and mistakes < mistake_threshold:
hint = f" | You win, the word is: `{word}`, you guessed wrong {mistakes} times."
elif mistakes >= mistake_threshold:
hint = f" | Pepek Jr. died! The word was: `{word}`, you guessed wrong {mistakes} times."
else:
hint = f" | No guess made in {timeout} seconds (timed out). The word was: `{word}`."
await ctx.message.add_reaction(basic_emoji.get("Si"))
await hangman_msg.edit(content=f"{hangman_stages[mistakes]}\n{hidden_word}{hint}")
def setup(bot):
bot.add_cog(Games(bot))
|
test.py | import json
import random
import re
import string
import threading
import time
from multiprocessing.dummy import Pool
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
main_configs=['configs/logs_config.xml', 'configs/config.d/storage_configuration.xml',
'configs/config.d/cluster.xml'],
with_zookeeper=True,
stay_alive=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 1})
node2 = cluster.add_instance('node2',
main_configs=['configs/logs_config.xml', 'configs/config.d/storage_configuration.xml',
'configs/config.d/cluster.xml'],
with_zookeeper=True,
stay_alive=True,
tmpfs=['/jbod1:size=40M', '/jbod2:size=40M', '/external:size=200M'],
macros={"shard": 0, "replica": 2})
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_system_tables(start_cluster):
expected_disks_data = [
{
"name": "default",
"path": "/var/lib/clickhouse/",
"keep_free_space": '1024',
},
{
"name": "jbod1",
"path": "/jbod1/",
"keep_free_space": '0',
},
{
"name": "jbod2",
"path": "/jbod2/",
"keep_free_space": '10485760',
},
{
"name": "external",
"path": "/external/",
"keep_free_space": '0',
}
]
click_disk_data = json.loads(node1.query("SELECT name, path, keep_free_space FROM system.disks FORMAT JSON"))[
"data"]
assert sorted(click_disk_data, key=lambda x: x["name"]) == sorted(expected_disks_data, key=lambda x: x["name"])
expected_policies_data = [
{
"policy_name": "small_jbod_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "small_jbod_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "small_jbod_with_external_no_merges",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "small_jbod_with_external_no_merges",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 1,
},
{
"policy_name": "one_more_small_jbod_with_external",
"volume_name": "m",
"volume_priority": "1",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "one_more_small_jbod_with_external",
"volume_name": "e",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "jbods_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1", "jbod2"],
"volume_type": "JBOD",
"max_data_part_size": "10485760",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "jbods_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "moving_jbod_with_external",
"volume_name": "main",
"volume_priority": "1",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.7,
"prefer_not_to_merge": 0,
},
{
"policy_name": "moving_jbod_with_external",
"volume_name": "external",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.7,
"prefer_not_to_merge": 0,
},
{
"policy_name": "default_disk_with_external",
"volume_name": "small",
"volume_priority": "1",
"disks": ["default"],
"volume_type": "JBOD",
"max_data_part_size": "2097152",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "default_disk_with_external",
"volume_name": "big",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "20971520",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_zero_volume",
"volume_priority": "1",
"disks": ["default"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_default_volume",
"volume_priority": "2",
"disks": ["external"],
"volume_type": "JBOD",
"max_data_part_size": "0",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_small_volume",
"volume_priority": "3",
"disks": ["jbod1"],
"volume_type": "JBOD",
"max_data_part_size": "1024",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
{
"policy_name": "special_warning_policy",
"volume_name": "special_warning_big_volume",
"volume_priority": "4",
"disks": ["jbod2"],
"volume_type": "JBOD",
"max_data_part_size": "1024000000",
"move_factor": 0.1,
"prefer_not_to_merge": 0,
},
]
clickhouse_policies_data = \
json.loads(node1.query("SELECT * FROM system.storage_policies WHERE policy_name != 'default' FORMAT JSON"))[
"data"]
def key(x):
return (x["policy_name"], x["volume_name"], x["volume_priority"])
assert sorted(clickhouse_policies_data, key=key) == sorted(expected_policies_data, key=key)
def test_query_parser(start_cluster):
try:
with pytest.raises(QueryRuntimeException):
node1.query("""
CREATE TABLE IF NOT EXISTS table_with_absent_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='very_exciting_policy'
""")
with pytest.raises(QueryRuntimeException):
node1.query("""
CREATE TABLE IF NOT EXISTS table_with_absent_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='jbod1'
""")
node1.query("""
CREATE TABLE IF NOT EXISTS table_with_normal_policy (
d UInt64
) ENGINE = MergeTree()
ORDER BY d
SETTINGS storage_policy='default'
""")
node1.query("INSERT INTO table_with_normal_policy VALUES (5)")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO VOLUME 'some_volume'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION tuple() TO DISK 'some_volume'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PART 'xxxxx' TO DISK 'jbod1'")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE table_with_normal_policy MOVE PARTITION 'yyyy' TO DISK 'jbod1'")
with pytest.raises(QueryRuntimeException):
node1.query(
"ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'")
finally:
node1.query("DROP TABLE IF EXISTS table_with_normal_policy SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("test_alter_policy", "MergeTree()", id="mt"),
pytest.param("replicated_test_alter_policy", "ReplicatedMergeTree('/clickhouse/test_alter_policy', '1')", id="replicated"),
])
def test_alter_policy(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
d UInt64
) ENGINE = {engine}
ORDER BY d
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(
name=name)) == "small_jbod_with_external\n"
with pytest.raises(QueryRuntimeException):
node1.query(
"""ALTER TABLE {name} MODIFY SETTING storage_policy='one_more_small_jbod_with_external'""".format(
name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(
name=name)) == "small_jbod_with_external\n"
node1.query_with_retry("""ALTER TABLE {name} MODIFY SETTING storage_policy='jbods_with_external'""".format(name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(
name=name)) == "jbods_with_external\n"
with pytest.raises(QueryRuntimeException):
node1.query(
"""ALTER TABLE {name} MODIFY SETTING storage_policy='small_jbod_with_external'""".format(name=name))
assert node1.query("""SELECT storage_policy FROM system.tables WHERE name = '{name}'""".format(
name=name)) == "jbods_with_external\n"
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
def get_random_string(length):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def get_used_disks_for_table(node, table_name):
return tuple(node.query(
"select disk_name from system.parts where table == '{}' and active=1 order by modification_time".format(
table_name)).strip().split('\n'))
def get_used_parts_for_table(node, table_name):
return node.query("SELECT name FROM system.parts WHERE table = '{}' AND active = 1 ORDER BY modification_time".format(table_name)).splitlines()
def test_no_warning_about_zero_max_data_part_size(start_cluster):
def get_log(node):
return node.exec_in_container(["bash", "-c", "cat /var/log/clickhouse-server/clickhouse-server.log"])
for node in (node1, node2):
node.query("""
CREATE TABLE IF NOT EXISTS default.test_warning_table (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""")
node.query("DROP TABLE IF EXISTS default.test_warning_table SYNC")
log = get_log(node)
assert not re.search("Warning.*Volume.*special_warning_zero_volume", log)
assert not re.search("Warning.*Volume.*special_warning_default_volume", log)
assert re.search("Warning.*Volume.*special_warning_small_volume", log)
assert not re.search("Warning.*Volume.*special_warning_big_volume", log)
@pytest.mark.parametrize("name,engine", [
pytest.param("mt_on_jbod", "MergeTree()", id="mt"),
pytest.param("replicated_mt_on_jbod", "ReplicatedMergeTree('/clickhouse/replicated_mt_on_jbod', '1')", id="replicated"),
])
def test_round_robin(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
d UInt64
) ENGINE = {engine}
ORDER BY d
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
# first should go to the jbod1
node1.query_with_retry("insert into {} select * from numbers(10000)".format(name))
used_disk = get_used_disks_for_table(node1, name)
assert len(used_disk) == 1, 'More than one disk used for single insert'
node1.query_with_retry("insert into {} select * from numbers(10000, 10000)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert len(used_disks) == 2, 'Two disks should be used for two parts'
assert used_disks[0] != used_disks[1], "Should write to different disks"
node1.query_with_retry("insert into {} select * from numbers(20000, 10000)".format(name))
used_disks = get_used_disks_for_table(node1, name)
# jbod1 -> jbod2 -> jbod1 -> jbod2 ... etc
assert len(used_disks) == 3
assert used_disks[0] != used_disks[1]
assert used_disks[2] == used_disks[0]
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("mt_with_huge_part", "MergeTree()", id="mt"),
pytest.param("replicated_mt_with_huge_part", "ReplicatedMergeTree('/clickhouse/replicated_mt_with_huge_part', '1')", id="replicated"),
])
def test_max_data_part_size(start_cluster, name, engine):
try:
assert int(*node1.query("""SELECT max_data_part_size FROM system.storage_policies WHERE policy_name = 'jbods_with_external' AND volume_name = 'main'""").splitlines()) == 10*1024*1024
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
data = [] # 10MB in total
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert len(used_disks) == 1
assert used_disks[0] == 'external'
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("mt_with_overflow", "MergeTree()", id="mt"),
pytest.param("replicated_mt_with_overflow", "ReplicatedMergeTree('/clickhouse/replicated_mt_with_overflow', '1')", id="replicated"),
])
def test_jbod_overflow(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name, engine=engine))
node1.query(f"SYSTEM STOP MERGES {name}")
# small jbod size is 40MB, so lets insert 5MB batch 7 times
for i in range(7):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert used_disks == tuple('jbod1' for _ in used_disks)
# should go to the external disk (jbod is overflown)
data = [] # 10MB in total
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
assert used_disks[-1] == 'external'
node1.query(f"SYSTEM START MERGES {name}")
time.sleep(1)
node1.query_with_retry("OPTIMIZE TABLE {} FINAL".format(name))
time.sleep(2)
disks_for_merges = tuple(node1.query(
"SELECT disk_name FROM system.parts WHERE table == '{}' AND level >= 1 and active = 1 ORDER BY modification_time".format(
name)).strip().split('\n'))
assert disks_for_merges == tuple('external' for _ in disks_for_merges)
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("moving_mt", "MergeTree()", id="mt"),
pytest.param("moving_replicated_mt", "ReplicatedMergeTree('/clickhouse/moving_replicated_mt', '1')", id="replicated"),
])
def test_background_move(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
node1.query(f"SYSTEM STOP MERGES {name}")
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
# small jbod size is 40MB, so lets insert 5MB batch 5 times
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
retry = 20
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(0.5)
used_disks = get_used_disks_for_table(node1, name)
i += 1
assert sum(1 for x in used_disks if x == 'jbod1') <= 2
# first (oldest) part was moved to external
assert used_disks[0] == 'external'
path = node1.query(
"SELECT path_on_disk FROM system.part_log WHERE table = '{}' AND event_type='MovePart' ORDER BY event_time LIMIT 1".format(
name))
# first (oldest) part was moved to external
assert path.startswith("/external")
node1.query(f"SYSTEM START MERGES {name}")
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("stopped_moving_mt", "MergeTree()", id="mt"),
pytest.param("stopped_moving_replicated_mt", "ReplicatedMergeTree('/clickhouse/stopped_moving_replicated_mt', '1')", id="replicated"),
])
def test_start_stop_moves(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
node1.query_with_retry("INSERT INTO {} VALUES ('HELLO')".format(name))
node1.query_with_retry("INSERT INTO {} VALUES ('WORLD')".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d == "jbod1" for d in used_disks), "All writes shoud go to jbods"
first_part = node1.query(
"SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format(
name)).strip()
node1.query("SYSTEM STOP MOVES")
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
used_disks = get_used_disks_for_table(node1, name)
assert all(d == "jbod1" for d in used_disks), "Blocked moves doesn't actually move something"
node1.query("SYSTEM START MOVES")
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
disk = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name,
first_part)).strip()
assert disk == "external"
node1.query_with_retry("TRUNCATE TABLE {}".format(name))
node1.query("SYSTEM STOP MOVES {}".format(name))
node1.query("SYSTEM STOP MERGES {}".format(name))
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
# jbod size is 40MB, so lets insert 5MB batch 7 times
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
used_disks = get_used_disks_for_table(node1, name)
retry = 5
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(0.1)
used_disks = get_used_disks_for_table(node1, name)
i += 1
# first (oldest) part doesn't move anywhere
assert used_disks[0] == 'jbod1'
node1.query("SYSTEM START MOVES {}".format(name))
# wait sometime until background backoff finishes
retry = 30
i = 0
while not sum(1 for x in used_disks if x == 'jbod1') <= 2 and i < retry:
time.sleep(1)
used_disks = get_used_disks_for_table(node1, name)
i += 1
node1.query("SYSTEM START MERGES {}".format(name))
assert sum(1 for x in used_disks if x == 'jbod1') <= 2
# first (oldest) part moved to external
assert used_disks[0] == 'external'
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
def get_path_for_part_from_part_log(node, table, part_name):
node.query("SYSTEM FLUSH LOGS")
path = node.query(
"SELECT path_on_disk FROM system.part_log WHERE table = '{}' and part_name = '{}' ORDER BY event_time DESC LIMIT 1".format(
table, part_name))
return path.strip()
def get_paths_for_partition_from_part_log(node, table, partition_id):
node.query("SYSTEM FLUSH LOGS")
paths = node.query(
"SELECT path_on_disk FROM system.part_log WHERE table = '{}' and partition_id = '{}' ORDER BY event_time DESC".format(
table, partition_id))
return paths.strip().split('\n')
@pytest.mark.parametrize("name,engine", [
pytest.param("altering_mt", "MergeTree()", id="mt"),
# ("altering_replicated_mt","ReplicatedMergeTree('/clickhouse/altering_replicated_mt', '1')",),
# SYSTEM STOP MERGES doesn't disable merges assignments
])
def test_alter_move(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name)) # to avoid conflicts
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 66)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-04-10'), 42)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-04-11'), 43)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
first_part = node1.query(
"SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format(
name)).strip()
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, first_part))
disk = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name,
first_part)).strip()
assert disk == 'external'
assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/external")
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PART '{}' TO DISK 'jbod1'".format(name, first_part))
disk = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name,
first_part)).strip()
assert disk == 'jbod1'
assert get_path_for_part_from_part_log(node1, name, first_part).startswith("/jbod1")
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO VOLUME 'external'".format(name))
disks = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format(
name)).strip().split('\n')
assert len(disks) == 2
assert all(d == "external" for d in disks)
assert all(
path.startswith("/external") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2])
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201904 TO DISK 'jbod2'".format(name))
disks = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201904' and active = 1".format(
name)).strip().split('\n')
assert len(disks) == 2
assert all(d == "jbod2" for d in disks)
assert all(
path.startswith("/jbod2") for path in get_paths_for_partition_from_part_log(node1, name, '201904')[:2])
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "4\n"
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("volume_or_disk", [
"DISK",
"VOLUME"
])
def test_alter_move_half_of_partition(start_cluster, volume_or_disk):
name = "alter_move_half_of_partition"
engine = "MergeTree()"
try:
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
time.sleep(1)
parts = node1.query("SELECT name FROM system.parts WHERE table = '{}' and active = 1".format(name)).splitlines()
assert len(parts) == 2
node1.query("ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(name, parts[0]))
disks = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(name, parts[
0])).splitlines()
assert disks == ["external"]
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name,
volume_or_disk=volume_or_disk))
disks = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format(
name)).splitlines()
assert disks == ["external"] * 2
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n"
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("volume_or_disk", [
"DISK",
"VOLUME"
])
def test_alter_double_move_partition(start_cluster, volume_or_disk):
name = "alter_double_move_partition"
engine = "MergeTree()"
try:
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
node1.query("SYSTEM STOP MERGES {}".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-15'), 65)".format(name))
node1.query("INSERT INTO {} VALUES(toDate('2019-03-16'), 42)".format(name))
used_disks = get_used_disks_for_table(node1, name)
assert all(d.startswith("jbod") for d in used_disks), "All writes should go to jbods"
time.sleep(1)
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name,
volume_or_disk=volume_or_disk))
disks = node1.query(
"SELECT disk_name FROM system.parts WHERE table = '{}' and partition = '201903' and active = 1".format(
name)).splitlines()
assert disks == ["external"] * 2
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "2\n"
time.sleep(1)
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {} MOVE PARTITION 201903 TO {volume_or_disk} 'external'".format(name,
volume_or_disk=volume_or_disk))
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
def produce_alter_move(node, name):
move_type = random.choice(["PART", "PARTITION"])
if move_type == "PART":
for _ in range(10):
try:
parts = node1.query(
"SELECT name from system.parts where table = '{}' and active = 1".format(name)).strip().split('\n')
break
except QueryRuntimeException:
pass
else:
raise Exception("Cannot select from system.parts")
move_part = random.choice(["'" + part + "'" for part in parts])
else:
move_part = random.choice([201903, 201904])
move_disk = random.choice(["DISK", "VOLUME"])
if move_disk == "DISK":
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
else:
move_volume = random.choice(["'main'", "'external'"])
try:
node1.query("ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
name, mt=move_type, mp=move_part, md=move_disk, mv=move_volume))
except QueryRuntimeException as ex:
pass
@pytest.mark.parametrize("name,engine", [
pytest.param("concurrently_altering_mt", "MergeTree()", id="mt"),
pytest.param("concurrently_altering_replicated_mt",
"ReplicatedMergeTree('/clickhouse/concurrently_altering_replicated_mt', '1')", id="replicated"),
])
def test_concurrent_alter_move(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({random.randint(1, 1000000) for _ in range(0, 1000)})
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_update(num):
for i in range(num):
node1.query("ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name))
def optimize_table(num):
for i in range(num):
node1.query_with_retry("OPTIMIZE TABLE {} FINAL".format(name))
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_update, (100,)))
tasks.append(p.apply_async(optimize_table, (100,)))
for task in tasks:
task.get(timeout=240)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("concurrently_dropping_mt", "MergeTree()", id="mt"),
pytest.param("concurrently_dropping_replicated_mt",
"ReplicatedMergeTree('/clickhouse/concurrently_dropping_replicated_mt', '1')", id="replicated"),
])
def test_concurrent_alter_move_and_drop(start_cluster, name, engine):
try:
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({random.randint(1, 1000000) for _ in range(0, 1000)})
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_drop(num):
for i in range(num):
partition = random.choice([201903, 201904])
drach = random.choice(["drop", "detach"])
node1.query("ALTER TABLE {} {} PARTITION {}".format(name, drach, partition))
insert(100)
p = Pool(15)
tasks = []
for i in range(5):
tasks.append(p.apply_async(insert, (100,)))
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_drop, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("detach_attach_mt", "MergeTree()", id="mt"),
pytest.param("replicated_detach_attach_mt", "ReplicatedMergeTree('/clickhouse/replicated_detach_attach_mt', '1')", id="replicated"),
])
def test_detach_attach(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
node1.query("ALTER TABLE {} DETACH PARTITION tuple()".format(name))
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "0"
assert node1.query("SELECT disk FROM system.detached_parts WHERE table = '{}'".format(name)).strip() == "jbod1"
node1.query("ALTER TABLE {} ATTACH PARTITION tuple()".format(name))
assert node1.query("SELECT count() FROM {}".format(name)).strip() == "5"
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("mutating_mt", "MergeTree()", id="mt"),
pytest.param("replicated_mutating_mt", "ReplicatedMergeTree('/clickhouse/replicated_mutating_mt', '1')", id="replicated"),
])
def test_mutate_to_another_disk(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external'
""".format(name=name, engine=engine))
for i in range(5):
data = [] # 5MB in total
for i in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
node1.query_with_retry("INSERT INTO {} VALUES {}".format(name, ','.join(["('" + x + "')" for x in data])))
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
retry = 20
while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0:
retry -= 1
time.sleep(0.5)
if node1.query("SELECT latest_fail_reason FROM system.mutations WHERE table = '{}'".format(name)) == "":
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
else: # mutation failed, let's try on another disk
print("Mutation failed")
node1.query_with_retry("OPTIMIZE TABLE {} FINAL".format(name))
node1.query("ALTER TABLE {} UPDATE s1 = concat(s1, 'x') WHERE 1".format(name))
retry = 20
while node1.query("SELECT * FROM system.mutations WHERE is_done = 0") != "" and retry > 0:
retry -= 1
time.sleep(0.5)
assert node1.query("SELECT sum(endsWith(s1, 'x')) FROM {}".format(name)) == "25\n"
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
@pytest.mark.parametrize("name,engine", [
pytest.param("alter_modifying_mt", "MergeTree()", id="mt"),
pytest.param("replicated_alter_modifying_mt", "ReplicatedMergeTree('/clickhouse/replicated_alter_modifying_mt', '1')", id="replicated"),
])
def test_concurrent_alter_modify(start_cluster, name, engine):
try:
node1.query_with_retry("""
CREATE TABLE IF NOT EXISTS {name} (
EventDate Date,
number UInt64
) ENGINE = {engine}
ORDER BY tuple()
PARTITION BY toYYYYMM(EventDate)
SETTINGS storage_policy='jbods_with_external'
""".format(name=name, engine=engine))
values = list({random.randint(1, 1000000) for _ in range(0, 1000)})
def insert(num):
for i in range(num):
day = random.randint(11, 30)
value = values.pop()
month = '0' + str(random.choice([3, 4]))
node1.query_with_retry("INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(name, m=month, d=day, v=value))
def alter_move(num):
for i in range(num):
produce_alter_move(node1, name)
def alter_modify(num):
for i in range(num):
column_type = random.choice(["UInt64", "String"])
try:
node1.query("ALTER TABLE {} MODIFY COLUMN number {}".format(name, column_type))
except:
if "Replicated" not in engine:
raise
insert(100)
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "100\n"
p = Pool(50)
tasks = []
for i in range(5):
tasks.append(p.apply_async(alter_move, (100,)))
tasks.append(p.apply_async(alter_modify, (100,)))
for task in tasks:
task.get(timeout=120)
assert node1.query("SELECT 1") == "1\n"
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "100\n"
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
def test_simple_replication_and_moves(start_cluster):
try:
for i, node in enumerate([node1, node2]):
node.query_with_retry("""
CREATE TABLE IF NOT EXISTS replicated_table_for_moves (
s1 String
) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_moves', '{}')
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2
""".format(i + 1))
def insert(num):
for i in range(num):
node = random.choice([node1, node2])
data = [] # 1MB in total
for i in range(2):
data.append(get_random_string(512 * 1024)) # 500KB value
node.query_with_retry("INSERT INTO replicated_table_for_moves VALUES {}".format(
','.join(["('" + x + "')" for x in data])))
def optimize(num):
for i in range(num):
node = random.choice([node1, node2])
node.query_with_retry("OPTIMIZE TABLE replicated_table_for_moves FINAL")
p = Pool(60)
tasks = []
tasks.append(p.apply_async(insert, (20,)))
tasks.append(p.apply_async(optimize, (20,)))
for task in tasks:
task.get(timeout=60)
node1.query_with_retry("SYSTEM SYNC REPLICA ON CLUSTER test_cluster replicated_table_for_moves", timeout=5)
node1.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n"
node2.query("SELECT COUNT() FROM replicated_table_for_moves") == "40\n"
data = [] # 1MB in total
for i in range(2):
data.append(get_random_string(512 * 1024)) # 500KB value
time.sleep(3) # wait until old parts will be deleted
node1.query("SYSTEM STOP MERGES")
node2.query("SYSTEM STOP MERGES")
node1.query_with_retry(
"INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data])))
node2.query_with_retry(
"INSERT INTO replicated_table_for_moves VALUES {}".format(','.join(["('" + x + "')" for x in data])))
time.sleep(3) # nothing was moved
disks1 = get_used_disks_for_table(node1, "replicated_table_for_moves")
disks2 = get_used_disks_for_table(node2, "replicated_table_for_moves")
node2.query("SYSTEM START MERGES ON CLUSTER test_cluster")
set(disks1) == set(["jbod1", "external"])
set(disks2) == set(["jbod1", "external"])
finally:
for node in [node1, node2]:
node.query("DROP TABLE IF EXISTS replicated_table_for_moves SYNC")
def test_download_appropriate_disk(start_cluster):
try:
for i, node in enumerate([node1, node2]):
node.query_with_retry("""
CREATE TABLE IF NOT EXISTS replicated_table_for_download (
s1 String
) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_download', '{}')
ORDER BY tuple()
SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2
""".format(i + 1))
data = []
for i in range(50):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query_with_retry(
"INSERT INTO replicated_table_for_download VALUES {}".format(','.join(["('" + x + "')" for x in data])))
for _ in range(10):
try:
print("Syncing replica")
node2.query_with_retry("SYSTEM SYNC REPLICA replicated_table_for_download")
break
except:
time.sleep(0.5)
disks2 = get_used_disks_for_table(node2, "replicated_table_for_download")
assert set(disks2) == set(["external"])
finally:
for node in [node1, node2]:
node.query_with_retry("DROP TABLE IF EXISTS replicated_table_for_download SYNC")
def test_rename(start_cluster):
try:
node1.query("""
CREATE TABLE IF NOT EXISTS default.renaming_table (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""")
for _ in range(5):
data = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query("INSERT INTO renaming_table VALUES {}".format(','.join(["('" + x + "')" for x in data])))
disks = get_used_disks_for_table(node1, "renaming_table")
assert len(disks) > 1
assert node1.query("SELECT COUNT() FROM default.renaming_table") == "50\n"
node1.query("RENAME TABLE default.renaming_table TO default.renaming_table1")
assert node1.query("SELECT COUNT() FROM default.renaming_table1") == "50\n"
with pytest.raises(QueryRuntimeException):
node1.query("SELECT COUNT() FROM default.renaming_table")
node1.query("CREATE DATABASE IF NOT EXISTS test")
node1.query("RENAME TABLE default.renaming_table1 TO test.renaming_table2")
assert node1.query("SELECT COUNT() FROM test.renaming_table2") == "50\n"
with pytest.raises(QueryRuntimeException):
node1.query("SELECT COUNT() FROM default.renaming_table1")
finally:
node1.query("DROP TABLE IF EXISTS default.renaming_table SYNC")
node1.query("DROP TABLE IF EXISTS default.renaming_table1 SYNC")
node1.query("DROP TABLE IF EXISTS test.renaming_table2 SYNC")
def test_freeze(start_cluster):
try:
node1.query("""
CREATE TABLE IF NOT EXISTS default.freezing_table (
d Date,
s String
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY toYYYYMM(d)
SETTINGS storage_policy='small_jbod_with_external'
""")
for _ in range(5):
data = []
dates = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
dates.append("toDate('2019-03-05')")
node1.query("INSERT INTO freezing_table VALUES {}".format(
','.join(["(" + d + ", '" + s + "')" for d, s in zip(dates, data)])))
disks = get_used_disks_for_table(node1, "freezing_table")
assert len(disks) > 1
assert node1.query("SELECT COUNT() FROM default.freezing_table") == "50\n"
node1.query("ALTER TABLE freezing_table FREEZE PARTITION 201903")
# check shadow files (backups) exists
node1.exec_in_container(["bash", "-c", "find /jbod1/shadow -name '*.mrk2' | grep '.*'"])
node1.exec_in_container(["bash", "-c", "find /external/shadow -name '*.mrk2' | grep '.*'"])
finally:
node1.query("DROP TABLE IF EXISTS default.freezing_table SYNC")
node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"])
def test_kill_while_insert(start_cluster):
try:
name = "test_kill_while_insert"
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
s String
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
data = []
dates = []
for i in range(10):
data.append(get_random_string(1024 * 1024)) # 1MB value
node1.query("INSERT INTO {name} VALUES {}".format(','.join(["('" + s + "')" for s in data]), name=name))
disks = get_used_disks_for_table(node1, name)
assert set(disks) == {"jbod1"}
def ignore_exceptions(f, *args):
try:
f(*args)
except:
"""(っಠ‿ಠ)っ"""
start_time = time.time()
long_select = threading.Thread(target=ignore_exceptions, args=(node1.query, "SELECT sleep(3) FROM {name}".format(name=name)))
long_select.start()
time.sleep(0.5)
node1.query("ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'external'".format(name=name))
assert time.time() - start_time < 2
node1.restart_clickhouse(kill=True)
try:
long_select.join()
except:
""""""
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["10"]
finally:
try:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
except:
"""ClickHouse may be inactive at this moment and we don't want to mask a meaningful exception."""
def test_move_while_merge(start_cluster):
try:
name = "test_move_while_merge"
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
n Int64
) ENGINE = MergeTree
ORDER BY sleep(2)
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
parts = get_used_parts_for_table(node1, name)
assert len(parts) == 2
def optimize():
node1.query("OPTIMIZE TABLE {name}".format(name=name))
optimize = threading.Thread(target=optimize)
optimize.start()
time.sleep(0.5)
with pytest.raises(QueryRuntimeException):
node1.query("ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
exiting = False
no_exception = {}
def alter():
while not exiting:
try:
node1.query(
"ALTER TABLE {name} MOVE PART '{part}' TO DISK 'external'".format(name=name, part=parts[0]))
no_exception['missing'] = 'exception'
break
except QueryRuntimeException:
""""""
alter_thread = threading.Thread(target=alter)
alter_thread.start()
optimize.join()
time.sleep(0.5)
exiting = True
alter_thread.join()
assert len(no_exception) == 0
assert node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == ["2"]
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
def test_move_across_policies_does_not_work(start_cluster):
try:
name = "test_move_across_policies_does_not_work"
node1.query("""
CREATE TABLE IF NOT EXISTS {name} (
n Int64
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='jbods_with_external'
""".format(name=name))
node1.query("""
CREATE TABLE IF NOT EXISTS {name}2 (
n Int64
) ENGINE = MergeTree
ORDER BY tuple()
SETTINGS storage_policy='small_jbod_with_external'
""".format(name=name))
node1.query("""INSERT INTO {name} VALUES (1)""".format(name=name))
try:
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO DISK 'jbod2'""".format(name=name))
except QueryRuntimeException:
"""All parts of partition 'all' are already on disk 'jbod2'."""
with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'):
node1.query("""ALTER TABLE {name}2 ATTACH PARTITION tuple() FROM {name}""".format(name=name))
with pytest.raises(QueryRuntimeException, match='.*because disk does not belong to storage policy.*'):
node1.query("""ALTER TABLE {name}2 REPLACE PARTITION tuple() FROM {name}""".format(name=name))
with pytest.raises(QueryRuntimeException, match='.*should have the same storage policy of source table.*'):
node1.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO TABLE {name}2""".format(name=name))
assert node1.query("""SELECT * FROM {name}""".format(name=name)).splitlines() == ["1"]
finally:
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
node1.query(f"DROP TABLE IF EXISTS {name}2 SYNC")
def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds):
try:
node.query("""
CREATE TABLE IF NOT EXISTS {name} (
n Int64
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY tuple()
TTL now()-1 TO VOLUME 'external'
SETTINGS storage_policy='{policy}'
""".format(name=name, policy=policy))
for i in range(parts):
node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i))
disks = get_used_disks_for_table(node, name)
assert set(disks) == {"external"}
node.query("""OPTIMIZE TABLE {name}""".format(name=name))
parts = get_used_parts_for_table(node, name)
assert len(parts) == parts_before_cmds
for cmd in cmds:
node.query(cmd)
node.query("""OPTIMIZE TABLE {name}""".format(name=name))
parts = get_used_parts_for_table(node, name)
assert len(parts) == parts_after_cmds
finally:
node.query(f"DROP TABLE IF EXISTS {name} SYNC")
def _check_merges_are_working(node, storage_policy, volume, shall_work):
try:
name = "_check_merges_are_working_{storage_policy}_{volume}".format(storage_policy=storage_policy, volume=volume)
node.query("""
CREATE TABLE IF NOT EXISTS {name} (
n Int64
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY tuple()
SETTINGS storage_policy='{storage_policy}'
""".format(name=name, storage_policy=storage_policy))
created_parts = 24
for i in range(created_parts):
node.query("""INSERT INTO {name} VALUES ({n})""".format(name=name, n=i))
try:
node.query("""ALTER TABLE {name} MOVE PARTITION tuple() TO VOLUME '{volume}' """.format(name=name, volume=volume))
except:
"""Ignore 'nothing to move'."""
expected_disks = set(node.query("""
SELECT disks FROM system.storage_policies ARRAY JOIN disks WHERE volume_name = '{volume_name}'
""".format(volume_name=volume)).splitlines())
disks = get_used_disks_for_table(node, name)
assert set(disks) <= expected_disks
node.query("""OPTIMIZE TABLE {name} FINAL""".format(name=name))
parts = get_used_parts_for_table(node, name)
assert len(parts) == 1 if shall_work else created_parts
finally:
node.query(f"DROP TABLE IF EXISTS {name} SYNC")
def _get_prefer_not_to_merge_for_storage_policy(node, storage_policy):
return list(map(int, node.query("SELECT prefer_not_to_merge FROM system.storage_policies WHERE policy_name = '{}' ORDER BY volume_priority".format(storage_policy)).splitlines()))
def test_simple_merge_tree_merges_are_disabled(start_cluster):
_check_merges_are_working(node1, "small_jbod_with_external_no_merges", "external", False)
def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluster):
try:
name = "test_no_merges_in_configuration_allow_from_query_without_reload"
policy = "small_jbod_with_external_no_merges"
node1.restart_clickhouse(kill=True)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
_check_merges_are_working(node1, policy, "external", False)
_insert_merge_execute(node1, name, policy, 2, [
"SYSTEM START MERGES ON VOLUME {}.external".format(policy)
], 2, 1)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
_check_merges_are_working(node1, policy, "external", True)
finally:
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster):
try:
name = "test_no_merges_in_configuration_allow_from_query_with_reload"
policy = "small_jbod_with_external_no_merges"
node1.restart_clickhouse(kill=True)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
_check_merges_are_working(node1, policy, "external", False)
_insert_merge_execute(node1, name, policy, 2, [
"SYSTEM START MERGES ON VOLUME {}.external".format(policy),
"SYSTEM RELOAD CONFIG"
], 2, 1)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
_check_merges_are_working(node1, policy, "external", True)
finally:
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
def test_no_merges_in_configuration_allow_from_query_with_reload_on_cluster(start_cluster):
try:
name = "test_no_merges_in_configuration_allow_from_query_with_reload"
policy = "small_jbod_with_external_no_merges"
node1.restart_clickhouse(kill=True)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
_check_merges_are_working(node1, policy, "external", False)
_insert_merge_execute(node1, name, policy, 2, [
"SYSTEM START MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format(policy),
"SYSTEM RELOAD CONFIG ON CLUSTER test_cluster"
], 2, 1)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
_check_merges_are_working(node1, policy, "external", True)
finally:
node1.query("SYSTEM STOP MERGES ON CLUSTER test_cluster ON VOLUME {}.external".format(policy))
def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cluster):
try:
name = "test_yes_merges_in_configuration_allow_from_query_without_reload"
policy = "small_jbod_with_external"
node1.restart_clickhouse(kill=True)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
_check_merges_are_working(node1, policy, "external", True)
_insert_merge_execute(node1, name, policy, 2, [
"SYSTEM STOP MERGES ON VOLUME {}.external".format(policy),
"INSERT INTO {name} VALUES (2)".format(name=name)
], 1, 2)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
_check_merges_are_working(node1, policy, "external", False)
finally:
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_cluster):
try:
name = "test_yes_merges_in_configuration_allow_from_query_with_reload"
policy = "small_jbod_with_external"
node1.restart_clickhouse(kill=True)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 0]
_check_merges_are_working(node1, policy, "external", True)
_insert_merge_execute(node1, name, policy, 2, [
"SYSTEM STOP MERGES ON VOLUME {}.external".format(policy),
"INSERT INTO {name} VALUES (2)".format(name=name),
"SYSTEM RELOAD CONFIG"
], 1, 2)
assert _get_prefer_not_to_merge_for_storage_policy(node1, policy) == [0, 1]
_check_merges_are_working(node1, policy, "external", False)
finally:
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
|
predict.py | #
# Copyright (c) 2018, Salesforce, Inc.
# The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import os
from pprint import pformat
from collections import defaultdict
import copy
import shutil
# multiprocessing with CUDA
from torch.multiprocessing import Process, set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
import torch
import pickle
from . import models
from .tasks.registry import get_tasks
from .util import set_seed, load_config_json, make_data_loader, log_model_size, init_devices, \
have_multilingual, combine_folders_on_disk, split_folder_on_disk, get_part_path
from .validate import generate_with_model, calculate_and_reduce_metrics
from .calibrate import ConfidenceEstimator
from .arguments import check_and_update_generation_args
logger = logging.getLogger(__name__)
def get_all_splits(args):
splits = []
if len(args.pred_languages) == 1 and len(args.tasks) > 1:
args.pred_languages *= len(args.tasks)
for i, task in enumerate(args.tasks):
task_languages = args.pred_languages[i]
logger.info(f'Loading {task}')
kwargs = {'train': None, 'validation': None, 'test': None}
if args.evaluate == 'train':
del kwargs['train'] # deleting keys means use the default file name
elif args.evaluate == 'valid':
kwargs['validation'] = args.pred_set_name
elif args.evaluate == 'test':
del kwargs['test']
else:
raise ValueError('Split used for prediction should be either train, valid or test')
kwargs.update({'skip_cache': args.skip_cache, 'subsample': args.subsample,
'cached_path': os.path.join(args.cache, task.name), 'all_dirs': task_languages,
'almond_lang_as_question': args.almond_lang_as_question})
kwargs['separate_eval'] = args.separate_eval
task_splits = task.get_splits(root=args.data, lower=args.lower, **kwargs)
if not isinstance(task_splits, list):
task_splits = [task_splits]
task_split_processed = []
for split in task_splits:
assert (split.eval or split.test or split.train) and not split.aux
if split.train:
split = split.train
elif split.eval:
split = split.eval
else:
split = split.test
task_split_processed.append(split)
splits.append(task_split_processed)
return splits
def prepare_data_iterators(args, val_sets, numericalizer, device):
logger.info(f'Preparing data iterators')
if len(args.val_batch_size) == 1 and len(val_sets) > 1:
args.val_batch_size *= len(val_sets)
iters = []
task_index = 0
for task, bs, val_set in zip(args.tasks, args.val_batch_size, val_sets):
task_iter = []
task_languages = args.pred_languages[task_index]
if task_languages is not None and args.separate_eval:
task_languages = task_languages.split('+')
assert len(task_languages) == len(val_set)
for index, set_ in enumerate(val_set):
loader, original_order = make_data_loader(set_, numericalizer, bs, device, train=False, return_original_order=True)
task_iter.append((task, task_languages[index], loader, original_order))
# single language task or no separate eval
else:
loader, original_order = make_data_loader(val_set[0], numericalizer, bs, device, train=False, return_original_order=True)
task_iter.append((task, task_languages, loader, original_order))
iters.extend(task_iter)
task_index += 1
return iters
def run(args, device):
Model = getattr(models, args.model)
model, _ = Model.from_pretrained(args.path,
model_checkpoint_file=args.checkpoint_name,
args=args,
device=device,
tasks=args.tasks,
)
if args.pred_languages[0] is not None:
model.set_decoder_start_token_id(args.pred_languages[0].split('+')[0])
else:
# use English as default
model.set_decoder_start_token_id('en')
val_sets = get_all_splits(args)
model.add_new_vocab_from_data(args.tasks)
iters = prepare_data_iterators(args, val_sets, model.numericalizer, device)
log_model_size(logger, model, args.model)
model.to(device)
decaScore = []
task_scores = defaultdict(list)
model.eval()
eval_dir = os.path.join(args.eval_dir, args.evaluate)
os.makedirs(eval_dir, exist_ok=True)
with torch.no_grad():
for task, language, it, original_order in iters:
logger.info(task.name)
# single language task
if language is None:
prediction_file_name = os.path.join(eval_dir, task.name + '.tsv')
results_file_name = os.path.join(eval_dir, task.name + '.results.json')
# multi language task
else:
prediction_file_name = os.path.join(eval_dir, task.name + '_{}.tsv'.format(language))
results_file_name = os.path.join(eval_dir, task.name + '_{}.results.json'.format(language))
if os.path.exists(prediction_file_name):
if args.overwrite:
logger.warning(f'{prediction_file_name} already exists -- overwriting **')
else:
raise OSError(f'{prediction_file_name} already exists')
if os.path.exists(results_file_name):
if args.overwrite:
logger.warning(f'{results_file_name} already exists -- overwriting **')
else:
raise OSError(f'{results_file_name} already exists')
if args.calibrator_path is not None:
confidence_estimator = ConfidenceEstimator.load(args.calibrator_path)
logger.info('Loading confidence estimator "%s" from %s', confidence_estimator.name, args.calibrator_path)
else:
confidence_estimator = None
with torch.cuda.amp.autocast(enabled=args.mixed_precision):
generation_output = generate_with_model(model, it, model.numericalizer, task, args,
original_order=original_order,
output_confidence_features=args.save_confidence_features,
confidence_estimator=confidence_estimator,
disable_progbar=False)
if args.save_confidence_features:
with open(args.confidence_feature_path, 'wb') as f:
pickle.dump(generation_output.confidence_features, f, protocol=4)
# write into file
# TODO change to jsonl format
with open(prediction_file_name, 'w' + ('' if args.overwrite else 'x')) as prediction_file:
for i in range(len(generation_output.example_ids)):
line = generation_output.example_ids[i] + '\t' + '\t'.join(generation_output.predictions[i]) # all outputs separated by '\t'
if args.calibrator_path is not None:
line += '\t' + str(generation_output.confidence_scores[i])
prediction_file.write(line + '\n')
if len(generation_output.answers) > 0:
metrics_to_compute = task.metrics
if args.main_metric_only:
metrics_to_compute = [metrics_to_compute[0]]
metrics = calculate_and_reduce_metrics(generation_output.predictions, generation_output.answers, metrics_to_compute, args)
with open(results_file_name, 'w' + ('' if args.overwrite else '+')) as results_file:
results_file.write(json.dumps(metrics) + '\n')
if not args.silent:
for i, (c, p, a) in enumerate(zip(generation_output.contexts, generation_output.predictions, generation_output.answers)):
log_string = f'\nContext {i+1}: {c}\nPrediction {i + 1} ({len(p)} outputs): {p}\nAnswer {i + 1}: {a}\n'
if args.calibrator_path is not None:
log_string += f'Confidence {i+1} : {generation_output.confidence_scores[i]:.3f}\n'
logger.info(log_string)
logger.info(metrics)
task_scores[task].append((len(generation_output.answers), metrics[task.metrics[0]]))
for task in task_scores.keys():
decaScore.append(sum([length * score for length, score in task_scores[task]]) / sum([length for length, score in task_scores[task]]))
logger.info(f'Evaluated Tasks:\n')
for i, task in enumerate(args.tasks):
logger.info(f'{task.name}: {decaScore[i]}')
logger.info(f'-------------------')
logger.info(f'DecaScore: {sum(decaScore)}\n')
logger.info(f'\nSummary: | {sum(decaScore)} | {" | ".join([str(x) for x in decaScore])} |\n')
def parse_argv(parser):
parser.add_argument('--path', type=str, required=True, help='Folder to load the model from')
parser.add_argument('--evaluate', type=str, required=True, choices=['train', 'valid', 'test'],
help='Which dataset to do predictions for (train, dev or test)')
parser.add_argument('--pred_set_name', default='eval', type=str, help='Name of dataset to run prediction for; will be ignored if --evaluate is test')
parser.add_argument('--tasks',
default=['almond', 'squad', 'iwslt.en.de', 'cnn_dailymail', 'multinli.in.out', 'sst', 'srl',
'zre', 'woz.en', 'wikisql', 'schema'], dest='task_names', nargs='+')
parser.add_argument('--devices', default=None, nargs='+', type=int,
help='a list of devices that can be used for prediction. By default, all devices will be used.')
parser.add_argument('--seed', default=123, type=int, help='Random seed.')
parser.add_argument('--data', default='.data/', type=str, help='where to load data from.')
parser.add_argument('--embeddings', default='.embeddings/', type=str, help='where to save embeddings.')
parser.add_argument('--checkpoint_name', default='best.pth',
help='Checkpoint file to use (relative to --path, defaults to best.pth)')
parser.add_argument('--overwrite', action='store_true', help='whether to overwrite previously written predictions')
parser.add_argument('--silent', action='store_true', help='whether to print predictions to stdout')
parser.add_argument('--skip_cache', action='store_true',
help='whether use exisiting cached splits or generate new ones')
parser.add_argument('--eval_dir', type=str, required=True, help='use this directory to store eval results')
parser.add_argument('--cache', default='.cache', type=str, help='where to save cached files')
parser.add_argument('--subsample', default=20000000, type=int, help='subsample the eval/test datasets (experimental)')
parser.add_argument('--pred_languages', type=str, nargs='+',
help='used to specify dataset languages used during prediction for multilingual tasks'
'multiple languages for each task should be concatenated with +')
parser.add_argument('--separate_eval', action='store_true',
help='evaluate on each language eval set separately')
parser.add_argument('--main_metric_only', action='store_true', help='If True, we only calculate the deca score metric for each task.')
# If not None, these values will override the values saved in the trained model's config file
parser.add_argument('--val_batch_size', nargs='+', default=None, type=int,
help='Batch size for validation corresponding to tasks in val tasks')
parser.add_argument("--reduce_metrics", type=str, default='max', choices=['max'], help='How to calculate the metric when there are multiple outputs per input.')
# These are generation hyperparameters. Each one can be a list of values in which case, we generate `num_outputs` outputs for each set of hyperparameters.
parser.add_argument("--num_outputs", type=int, nargs='+', default=[1], help='number of sequences to output per input')
parser.add_argument("--temperature", type=float, nargs='+', default=[0.0],
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, nargs='+', default=[1.0],
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, nargs='+', default=[0], help='0 disables top-k filtering')
parser.add_argument("--top_p", type=float, nargs='+', default=[1.0], help='1.0 disables top-p filtering')
parser.add_argument("--num_beams", type=int, nargs='+', default=[1], help='1 disables beam seach')
parser.add_argument("--num_beam_groups", type=int, nargs='+', default=[1], help='1 disables diverse beam seach')
parser.add_argument("--diversity_penalty", type=float, nargs='+', default=[0.0], help='0 disables diverse beam seach')
parser.add_argument("--no_repeat_ngram_size", type=int, nargs='+', default=[0], help='ngrams of this size cannot be repeated in the output. 0 disables it.')
parser.add_argument('--max_output_length', default=150, type=int, help='maximum output length for generation')
# These are used for confidence calibration
parser.add_argument('--calibrator_path', type=str, default=None, help='If provided, will be used to output confidence scores for each prediction.')
parser.add_argument('--save_confidence_features', action='store_true', help='If provided, will be used to output confidence scores for each prediction.')
parser.add_argument("--confidence_feature_path", type=str, default=None, help='A .pkl file to save confidence features in.')
parser.add_argument("--mc_dropout", action='store_true', help='Monte Carlo dropout')
parser.add_argument("--mc_dropout_num", type=int, default=0, help='Number of samples to use for Monte Carlo dropout')
parser.add_argument("--mixed_precision", action='store_true', help='If True, will use mixed precision for prediction.'
'This reduces memory consumption and is especially faster on GPUs like NVIDIA V100 and T4. May slightly change the generated output.')
def adjust_multilingual_eval(args):
if (have_multilingual(args.task_names) and args.pred_languages is None) or (
args.pred_languages and len(args.task_names) != len(args.pred_languages)):
raise ValueError('You have to define prediction languages when you have a multilingual task'
'Use None for single language tasks. Also provide languages in the same order you provided the tasks.')
if args.pred_languages is None:
args.pred_languages = [None for _ in range(len(args.task_names))]
if 'mbart' in args.pretrained_model:
if args.pred_languages[0] and len(args.pred_languages[0].split('+')) != 1:
raise ValueError('For now we only support single language prediction with mbart models')
# preserve backward compatibility for single language tasks
for i, task_name in enumerate(args.task_names):
if 'multilingual' in task_name and args.pred_languages[i] is None:
raise ValueError('You have to define prediction languages for this multilingual task: {}'.format(task_name))
elif 'multilingual' not in task_name and args.pred_languages[i] is not None:
logger.warning('prediction languages should be empty for single language tasks')
args.pred_languages[i] = None
def set_default_values(args):
"""
sets default values that depend on other input arguments
"""
if args.confidence_feature_path is None:
args.confidence_feature_path = os.path.join(args.path, 'confidence_features.pkl')
def main(args):
load_config_json(args)
check_and_update_generation_args(args)
adjust_multilingual_eval(args)
set_default_values(args)
set_seed(args)
args.tasks = list(get_tasks(args.task_names, args).values())
logger.info(f'Arguments:\n{pformat(vars(args))}')
logger.info(f'Loading from {args.best_checkpoint}')
devices = init_devices(args)
if args.devices is not None:
devices = [devices[i] for i in args.devices]
if len(devices) > 1:
# Independent multi-GPU generation
all_processes = []
all_data_folders = split_folder_on_disk(args.data, len(devices))
for device_id in range(len(devices)):
copy_args = copy.copy(args)
copy_args.data = all_data_folders[device_id]
copy_args.eval_dir = get_part_path(args.eval_dir, device_id)
p = Process(target=run, args=(copy_args, devices[device_id]))
all_processes.append(p)
p.start()
for p in all_processes:
p.join()
for folder in all_data_folders:
shutil.rmtree(folder)
combine_folders_on_disk(args.eval_dir, len(devices), line_group_size=1, delete=True)
else:
run(args, devices[0])
|
core.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from unittest import mock
import doctest
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import sqlalchemy
import subprocess
import tempfile
from tempfile import NamedTemporaryFile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from time import sleep
from airflow import configuration, jobs, models, DAG, utils, macros, settings, exceptions
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import Variable, TaskInstance, BaseOperator, Connection, TaskFail
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is None:
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, six.string_types)
run_id = 'trig__' + utc_now_str
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, six.string_types))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = models.TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=models.DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class CliTests(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
configuration.load_test_config()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
args = self.parser.parse_args([
'users', '-d', '--username', 'test3',
])
cli.users(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', '-c', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.users(self.parser.parse_args(['users', '-l']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', '-i', f.name
])
cli.users(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', '-e', f.name
])
cli.users(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', '--add-role', '--username', 'test4', '--role', 'Op'
])
cli.users(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', '--remove-role', '--username', 'test4', '--role', 'Viewer'
])
cli.users(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', '--create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', '--create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles(args)
cli.roles(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.roles(self.parser.parse_args(['roles', '-l']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['list_jobs'])
cli.list_jobs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100'])
cli.list_jobs(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Set a dict
cli.variables(self.parser.parse_args([
'variables', '-s', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables(self.parser.parse_args([
'variables', '-s', 'list', '["oops"]']))
# Set str
cli.variables(self.parser.parse_args([
'variables', '-s', 'str', 'hello string']))
# Set int
cli.variables(self.parser.parse_args([
'variables', '-s', 'int', '42']))
# Set float
cli.variables(self.parser.parse_args([
'variables', '-s', 'float', '42.0']))
# Set true
cli.variables(self.parser.parse_args([
'variables', '-s', 'true', 'true']))
# Set false
cli.variables(self.parser.parse_args([
'variables', '-s', 'false', 'false']))
# Set none
cli.variables(self.parser.parse_args([
'variables', '-s', 'null', 'null']))
# Export and then import
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables3.json']))
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
snakebite = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual('attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import warnings
import tvm
from tvm import autotvm, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.target import Target
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
compiler.lower(mod, target)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
dump_workload_to_dag_log=None,
opt_level=3,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
dump_workload_to_dag_log: Optional[str]
A file to dump an association between the workload keys and the actual DAG
opt_level : Optional[int]
The optimization level of the task extractions.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, target, opt_level)
)
build_thread.start()
build_thread.join()
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(int(weight))
if dump_workload_to_dag_log is not None:
with open(dump_workload_to_dag_log, "w") as f:
json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
# same as EXTRACT_TASK but ignore the task without complex ops
EXTRACT_COMPLEX_TASK_ONLY = 1
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import (
prepare_input_map,
) # lazily import to avoid recursive dependency
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.workload_key(), io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
autoLoginYahooMail.py | from threading import Thread
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import sys
#email_forward = 'rimalisa2019@gmail.com'
# read txt file
def read_email_info():
file1 = open("yahoo forward.txt", "r+")
n = 0
with file1 as myfile:
data = myfile.readlines()
n = len(data)
# print(n)
for i in range(n):
data[i] = data[i].split(':')
return data, n
data, num = read_email_info()
repeat = 0
# timer define : start thread
def timer1(delay, repeat):
while repeat < num:
time.sleep(delay)
login(email=data[repeat][0], password=data[repeat][1], forward_email=data[repeat][2])
repeat += 2
print("First thread" + "completeed")
repeat1 = 1
def timer2(delay, repeat1):
while repeat1 < num+1:
time.sleep(delay)
login(email=data[repeat1][0], password=data[repeat1][1], forward_email=data[repeat1][2])
repeat1 += 2
print("Second thread" + "completeed")
# auto login and setting forward emailing
def login(email, password, forward_email):
fp = webdriver.FirefoxProfile()
fp.set_preference("http.response.timeout", 5)
fp.set_preference("dom.max_script_run_time", 5)
driver = webdriver.Firefox(firefox_profile=fp)
driver.delete_all_cookies()
driver.get(
'https://login.yahoo.com/?.src=ym&.lang=en-US&.intl=us&.done=https%3A%2F%2Fmail.yahoo.com%2Fd%3F.src%3Dfp')
driver.set_page_load_timeout(5)
driver.maximize_window()
email_input = driver.find_element_by_name("username")
email_input.send_keys(email)
next_btn = driver.find_element_by_name("signin")
next_btn.send_keys(Keys.RETURN)
driver.set_page_load_timeout(5)
time.sleep(5)
password_input = driver.find_element_by_name("password")
password_input.send_keys(password)
next_btn1 = driver.find_element_by_xpath("//button[contains(@id,'login-signin') and contains(@name,'verifyPassword')]")
next_btn1.send_keys(Keys.RETURN)
# Setting on HomePage
driver.set_page_load_timeout(15)
time.sleep(15)
setting_link = driver.find_element_by_xpath("//span[contains(@data-test-id,'settings-link-label')]")
setting_link.click()
time.sleep(5)
more_setting = driver.find_element_by_xpath("//a[contains(@data-test-id,'more-settings')]")
more_setting.click()
# mailboxes on Setting
driver.set_page_load_timeout(5)
time.sleep(5)
mailboxes = driver.find_element_by_xpath("//a[contains(@data-test-id,'settings-tab-1')]")
mailboxes.click()
time.sleep(5)
mailboxes_list = driver.find_element_by_xpath("//li[contains(@data-test-id,'accounts-list-item')]")
mailboxes_list.click()
time.sleep(5)
email_input_forward = driver.find_element_by_name("stateForwardEmail")
email_input_forward.clear()
email_input_forward.send_keys(forward_email)
time.sleep(5)
verify_bt = driver.find_element_by_xpath("//button[contains(@data-test-id,'accounts-verify-forwarding-btn')]")
verify_bt.send_keys(Keys.RETURN)
# Save Button
time.sleep(5)
save_bt = driver.find_element_by_xpath("//button[contains(@data-test-id,'edit-save-btn')]")
save_bt.send_keys(Keys.RETURN)
# quit browser
time.sleep(5)
driver.quit()
def main():
t1 = Thread(target=timer1, args=(5, repeat))
t2 = Thread(target=timer2, args=(5, repeat1))
t1.start()
t2.start()
main()
|
taskManager.py | # BSD 2-Clause License
#
# Copyright (c) 2021, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from subprocess import PIPE
from threading import RLock, Thread
import psutil
from ...error import LauncherError
from ...log import _get_log_level, get_logger
from .util.shell import execute_async_cmd, execute_cmd
logger = get_logger(__name__)
verbose_tm = bool(_get_log_level() == "developer")
TM_INTERVAL = 1
class TaskManager:
"""The Task Manager watches the subprocesses launched through
the asyncronous shell interface. Each task is a wrapper
around the Popen/Process instance.
The Task Managers polls processes on TM_INTERVAL
and detects job failure and completion. Upon termination, the
task returncode, output, and error are added to the task history.
When a launcher uses the task manager to start a task, the task
is either managed (by a WLM) or unmanaged (meaning not managed by
a WLM). In the latter case, the Task manager is responsible for the
lifecycle of the process.
"""
def __init__(self):
"""Initialize a task manager thread."""
self.actively_monitoring = False
self.task_history = dict()
self.tasks = []
self._lock = RLock()
def start(self):
"""Start the task manager thread
The TaskManager is run as a daemon thread meaning
that it will die when the main thread dies.
"""
monitor = Thread(name="TaskManager", daemon=True, target=self.run)
monitor.start()
def run(self):
"""Start monitoring Tasks"""
global verbose_tm
if verbose_tm:
logger.debug("Starting Task Manager")
self.actively_monitoring = True
while self.actively_monitoring:
time.sleep(TM_INTERVAL)
for task in self.tasks:
returncode = task.check_status() # poll and set returncode
# has to be != None because returncode can be 0
if returncode is not None:
output, error = task.get_io()
self.add_task_history(task.pid, returncode, output, error)
self.remove_task(task.pid)
if len(self) == 0:
self.actively_monitoring = False
if verbose_tm:
logger.debug("Sleeping, no tasks to monitor")
def start_task(self, cmd_list, cwd, env=None, out=PIPE, err=PIPE):
"""Start a task managed by the TaskManager
This is an "unmanaged" task, meaning it is NOT managed
by a workload manager
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param out: output file, defaults to PIPE
:type out: file, optional
:param err: error file, defaults to PIPE
:type err: file, optional
:return: task id
:rtype: int
"""
self._lock.acquire()
try:
proc = execute_async_cmd(cmd_list, cwd, env=env, out=out, err=err)
task = Task(proc)
if verbose_tm:
logger.debug(f"Starting Task {task.pid}")
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
return task.pid
finally:
self._lock.release()
def start_and_wait(self, cmd_list, cwd, env=None, timeout=None):
"""Start a task not managed by the TaskManager
This method is used by launchers to launch managed tasks
meaning that they ARE managed by a WLM.
This is primarily used for batch job launches
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param timeout: time to wait, defaults to None
:type timeout: int, optional
:return: returncode, output, and err
:rtype: int, str, str
"""
returncode, out, err = execute_cmd(cmd_list, cwd=cwd, env=env, timeout=timeout)
if verbose_tm:
logger.debug("Ran and waited on task")
return returncode, out, err
def add_existing(self, task_id):
"""Add existing task to be managed by the TaskManager
:param task_id: task id of existing task
:type task_id: int
:raises LauncherError: If task cannot be found
"""
self._lock.acquire()
try:
process = psutil.Process(pid=task_id)
task = Task(process)
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
except (psutil.NoSuchProcess, psutil.AccessDenied):
raise LauncherError(f"Process provided {task_id} does not exist") from None
finally:
self._lock.release()
def remove_task(self, task_id):
"""Remove a task from the TaskManager
:param task_id: id of the task to remove
:type task_id: str
"""
self._lock.acquire()
if verbose_tm:
logger.debug(f"Removing Task {task_id}")
try:
task = self[task_id]
if task.is_alive:
task.kill()
returncode = task.check_status()
out, err = task.get_io()
self.add_task_history(task_id, returncode, out, err)
self.tasks.remove(task)
except psutil.NoSuchProcess:
logger.debug("Failed to kill a task during removal")
except KeyError:
logger.debug("Failed to remove a task, task was already removed")
finally:
self._lock.release()
def get_task_update(self, task_id):
"""Get the update of a task
:param task_id: task id
:type task_id: str
:return: status, returncode, output, error
:rtype: str, int, str, str
"""
self._lock.acquire()
try:
rc, out, err = self.task_history[task_id]
# has to be == None because rc can be 0
if rc == None:
try:
task = self[task_id]
return task.status, rc, out, err
# removed forcefully either by OS or us, no returncode set
# either way, job has completed and we won't have returncode
# Usually hits when jobs last less then the TM_INTERVAL
except (KeyError, psutil.NoSuchProcess):
return "Completed", rc, out, err
# process has completed, status set manually as we don't
# save task statuses during runtime.
else:
if rc != 0:
return "Failed", rc, out, err
return "Completed", rc, out, err
finally:
self._lock.release()
def add_task_history(self, task_id, returncode, out=None, err=None):
"""Add a task to the task history
Add a task to record its future returncode, output and error
:param task_id: id of the task
:type task_id: str
:param returncode: returncode
:type returncode: int
:param out: output, defaults to None
:type out: str, optional
:param err: output, defaults to None
:type err: str, optional
"""
self.task_history[task_id] = (returncode, out, err)
def __getitem__(self, task_id):
self._lock.acquire()
try:
for task in self.tasks:
if task.pid == task_id:
return task
raise KeyError
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self.tasks)
finally:
self._lock.release()
class Task:
def __init__(self, process):
"""Initialize a task
:param process: Popen object
:type process: psutil.Popen
"""
self.process = process
self.pid = str(self.process.pid)
def check_status(self):
"""Ping the job and return the returncode if finished
:return: returncode if finished otherwise None
:rtype: int
"""
if self.owned:
return self.process.poll()
# we can't manage Processed we don't own
# have to rely on .kill() to stop.
return self.returncode
def get_io(self):
"""Get the IO from the subprocess
:return: output and error from the Popen
:rtype: str, str
"""
# Process class does not implement communicate
if not self.owned:
return None, None
output, error = self.process.communicate()
if output:
output = output.decode("utf-8")
if error:
error = error.decode("utf-8")
return output, error
def kill(self, timeout=10):
"""Kill the subprocess and all childen"""
def kill_callback(proc):
logger.debug(f"Process terminated with kill {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
for child in children:
child.kill()
_, alive = psutil.wait_procs(children, timeout=timeout, callback=kill_callback)
if alive:
for proc in alive:
logger.warning(f"Unable to kill emitted process {proc.pid}")
def terminate(self, timeout=10):
"""Terminate a this process and all children.
:param timeout: time to wait for task death, defaults to 10
:type timeout: int, optional
"""
def terminate_callback(proc):
logger.debug(f"Cleanly terminated task {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
# try SIGTERM first for clean exit
for child in children:
logger.debug(child)
child.terminate()
# wait for termination
_, alive = psutil.wait_procs(
children, timeout=timeout, callback=terminate_callback
)
if alive:
logger.debug(f"SIGTERM failed, using SIGKILL")
self.process.kill()
def wait(self):
self.process.wait()
@property
def returncode(self):
if self.owned:
return self.process.returncode
if self.is_alive:
return None
return 0
@property
def is_alive(self):
return self.process.is_running()
@property
def status(self):
return self.process.status()
@property
def owned(self):
if isinstance(self.process, psutil.Popen):
return True
return False
|
conftest.py | from __future__ import print_function
import pytest
import time
import datetime
import requests
import os
import sys
import threading
import logging
import shutil
from contextlib import contextmanager
from tests import utils
from six.moves import queue
from wandb import wandb_sdk
# from multiprocessing import Process
import subprocess
import click
from click.testing import CliRunner
import webbrowser
import git
import psutil
import atexit
import wandb
import shutil
from wandb.util import mkdir_exists_ok
from six.moves import urllib
from wandb.sdk.lib.module import unset_globals
from wandb.sdk.lib.git import GitRepo
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.interface.interface import BackendSender
from wandb.proto import wandb_internal_pb2
from wandb.proto import wandb_internal_pb2 as pb
try:
import nbformat
except ImportError: # TODO: no fancy notebook fun in python2
pass
try:
from unittest.mock import MagicMock
except ImportError: # TODO: this is only for python2
from mock import MagicMock
DUMMY_API_KEY = "1824812581259009ca9981580f8f8a9012409eee"
class ServerMap(object):
def __init__(self):
self._map = {}
def items(self):
return self._map.items()
def __getitem__(self, worker_id):
if self._map.get(worker_id) is None:
self._map[worker_id] = start_mock_server(worker_id)
return self._map[worker_id]
servers = ServerMap()
def test_cleanup(*args, **kwargs):
print("Shutting down mock servers")
for wid, server in servers.items():
print("Shutting down {}".format(wid))
server.terminate()
print("Open files during tests: ")
proc = psutil.Process()
print(proc.open_files())
def start_mock_server(worker_id):
"""We start a flask server process for each pytest-xdist worker_id"""
port = utils.free_port()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
path = os.path.join(root, "tests", "utils", "mock_server.py")
command = [sys.executable, "-u", path]
env = os.environ
env["PORT"] = str(port)
env["PYTHONPATH"] = root
logfname = os.path.join(
root, "tests", "logs", "live_mock_server-{}.log".format(worker_id)
)
logfile = open(logfname, "w")
server = subprocess.Popen(
command,
stdout=logfile,
env=env,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=True,
)
server._port = port
server.base_url = "http://localhost:%i" % server._port
def get_ctx():
return requests.get(server.base_url + "/ctx").json()
def set_ctx(payload):
return requests.put(server.base_url + "/ctx", json=payload).json()
def reset_ctx():
return requests.delete(server.base_url + "/ctx").json()
server.get_ctx = get_ctx
server.set_ctx = set_ctx
server.reset_ctx = reset_ctx
started = False
for i in range(10):
try:
res = requests.get("%s/ctx" % server.base_url, timeout=5)
if res.status_code == 200:
started = True
break
print("Attempting to connect but got: %s" % res)
except requests.exceptions.RequestException:
print(
"Timed out waiting for server to start...", server.base_url, time.time()
)
if server.poll() is None:
time.sleep(1)
else:
raise ValueError("Server failed to start.")
if started:
print("Mock server listing on {} see {}".format(server._port, logfname))
else:
server.terminate()
print("Server failed to launch, see {}".format(logfname))
try:
print("=" * 40)
with open(logfname) as f:
for logline in f.readlines():
print(logline.strip())
print("=" * 40)
except Exception as e:
print("EXCEPTION:", e)
raise ValueError("Failed to start server! Exit code %s" % server.returncode)
return server
atexit.register(test_cleanup)
@pytest.fixture
def test_name(request):
# change "test[1]" to "test__1__"
name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
return name
@pytest.fixture
def test_dir(test_name):
orig_dir = os.getcwd()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
test_dir = os.path.join(root, "tests", "logs", test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
mkdir_exists_ok(test_dir)
os.chdir(test_dir)
yield test_dir
os.chdir(orig_dir)
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open("wandb/settings", "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
r.index.add(["README"])
r.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_empty_pass(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def dummy_api_key():
return DUMMY_API_KEY
@pytest.fixture
def test_settings(test_dir, mocker, live_mock_server):
"""Settings object for tests"""
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
wandb.wandb_sdk.wandb_run.EXIT_TIMEOUT = 15
wandb.wandb_sdk.wandb_setup._WandbSetup.instance = None
wandb_dir = os.path.join(test_dir, "wandb")
mkdir_exists_ok(wandb_dir)
# root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
settings = wandb.Settings(
_start_time=time.time(),
base_url=live_mock_server.base_url,
root_dir=test_dir,
save_code=False,
project="test",
console="off",
host="test",
api_key=DUMMY_API_KEY,
run_id=wandb.util.generate_id(),
_start_datetime=datetime.datetime.now(),
)
yield settings
# Just incase someone forgets to join in tests
if wandb.run is not None:
wandb.run.finish()
@pytest.fixture
def mocked_run(runner, test_settings):
"""A managed run object for tests with a mock backend"""
run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
run._set_backend(MagicMock())
yield run
@pytest.fixture
def runner(monkeypatch, mocker):
# monkeypatch.setattr('wandb.cli.api', InternalApi(
# default_settings={'project': 'test', 'git_tag': True}, load_settings=False))
monkeypatch.setattr(wandb.util, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(wandb.wandb_lib.apikey, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(click, "launch", lambda x: 1)
monkeypatch.setattr(webbrowser, "open_new_tab", lambda x: True)
mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True)
mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1)
mocker.patch("wandb.wandb_lib.apikey.getpass.getpass", lambda x: DUMMY_API_KEY)
return CliRunner()
@pytest.fixture(autouse=True)
def reset_setup():
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
@pytest.fixture(autouse=True)
def local_netrc(monkeypatch):
"""Never use our real credentials, put them in their own isolated dir"""
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
# Touch that netrc
open(".netrc", "wb").close()
def expand(path):
if "netrc" in path:
try:
ret = os.path.realpath("netrc")
except OSError:
ret = origexpand(path)
else:
ret = origexpand(path)
return ret
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture(autouse=True)
def local_settings(mocker):
"""Place global settings in an isolated dir"""
with CliRunner().isolated_filesystem():
cfg_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
mkdir_exists_ok(os.path.join(".config", "wandb"))
mocker.patch("wandb.old.settings.Settings._global_path", return_value=cfg_path)
yield
@pytest.fixture
def mock_server(mocker):
return utils.mock_server(mocker)
# We create one live_mock_server per pytest-xdist worker
@pytest.fixture
def live_mock_server(request, worker_id):
global servers
server = servers[worker_id]
name = urllib.parse.quote(request.node.name)
# We set the username so the mock backend can namespace state
os.environ["WANDB_USERNAME"] = name
os.environ["WANDB_BASE_URL"] = server.base_url
os.environ["WANDB_ERROR_REPORTING"] = "false"
os.environ["WANDB_API_KEY"] = DUMMY_API_KEY
# clear mock server ctx
server.reset_ctx()
yield server
del os.environ["WANDB_USERNAME"]
del os.environ["WANDB_BASE_URL"]
del os.environ["WANDB_ERROR_REPORTING"]
del os.environ["WANDB_API_KEY"]
@pytest.fixture
def notebook(live_mock_server, test_dir):
"""This launches a live server, configures a notebook to use it, and enables
devs to execute arbitrary cells. See tests/test_notebooks.py
"""
@contextmanager
def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs):
with open(utils.notebook_path("setup.ipynb")) as f:
setupnb = nbformat.read(f, as_version=4)
setupcell = setupnb["cells"][0]
# Ensure the notebooks talks to our mock server
new_source = setupcell["source"].replace(
"__WANDB_BASE_URL__", live_mock_server.base_url,
)
if save_code:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path)
else:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "")
setupcell["source"] = new_source
nb_path = utils.notebook_path(nb_path)
shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path)))
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
nb["cells"].insert(0, setupcell)
try:
client = utils.WandbNotebookClient(nb, kernel_name=kernel_name)
with client.setup_kernel(**kwargs):
# Run setup commands for mocks
client.execute_cells(-1, store_history=False)
yield client
finally:
with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f:
f.write(client.all_output_text())
wandb.termlog("Find debug logs at: %s" % os.getcwd())
wandb.termlog(client.all_output_text())
notebook_loader.base_url = live_mock_server.base_url
return notebook_loader
@pytest.fixture
def mocked_module(monkeypatch):
"""This allows us to mock modules loaded via wandb.util.get_module"""
def mock_get_module(module):
orig_get_module = wandb.util.get_module
mocked_module = MagicMock()
def get_module(mod):
if mod == module:
return mocked_module
else:
return orig_get_module(mod)
monkeypatch.setattr(wandb.util, "get_module", get_module)
return mocked_module
return mock_get_module
@pytest.fixture
def mocked_ipython(monkeypatch):
monkeypatch.setattr(
wandb.wandb_sdk.wandb_settings, "_get_python_type", lambda: "jupyter"
)
ipython = MagicMock()
# TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work
orig_get_ipython = wandb.jupyter.get_ipython
wandb.jupyter.get_ipython = lambda: ipython
yield ipython
wandb.jupyter.get_ipython = orig_get_ipython
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
}
def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker))
@pytest.fixture
def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
for k, v in args["env"].items():
os.environ[k] = v
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
run = wandb.init(
settings=wandb.Settings(console="off", mode="offline", _except_exit=False),
**args["wandb_init"],
)
yield run
wandb.join()
finally:
unset_globals()
for k, v in args["env"].items():
del os.environ[k]
@pytest.fixture
def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
return wandb.init(
settings=wandb.Settings(
console="off", mode="offline", _except_exit=False
),
*args,
**kwargs,
)
finally:
unset_globals()
return init
@pytest.fixture()
def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass
@pytest.fixture()
def disable_console():
os.environ["WANDB_CONSOLE"] = "off"
yield
del os.environ["WANDB_CONSOLE"]
@pytest.fixture()
def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id)
yield parse_ctx_fn
@pytest.fixture()
def record_q():
return queue.Queue()
@pytest.fixture()
def fake_interface(record_q):
return BackendSender(record_q=record_q)
@pytest.fixture
def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend()
@pytest.fixture
def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn
@pytest.fixture
def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn
@pytest.fixture
def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
# def pytest_runtest_makereport(item, call):
# outcome = yield
# rep = outcome.get_result()
# if rep.when == "call" and rep.failed:
# print("DEBUG PYTEST", rep, item, call, outcome)
@pytest.fixture
def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr)
# ----------------------
# internal test fixtures
# ----------------------
@pytest.fixture()
def internal_result_q():
return queue.Queue()
@pytest.fixture()
def internal_sender_q():
return queue.Queue()
@pytest.fixture()
def internal_writer_q():
return queue.Queue()
@pytest.fixture()
def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess()
class MockProcess:
def __init__(self):
self._alive = True
def is_alive(self):
return self._alive
@pytest.fixture()
def _internal_sender(record_q, internal_result_q, internal_process):
return BackendSender(
record_q=record_q, result_q=internal_result_q, process=internal_process,
)
@pytest.fixture()
def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
_internal_sender,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=_internal_sender,
)
yield sm
@pytest.fixture()
def stopped_event():
stopped = threading.Event()
yield stopped
@pytest.fixture()
def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_sender_q,
internal_writer_q,
_internal_sender,
stopped_event,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
sender_q=internal_sender_q,
writer_q=internal_writer_q,
interface=_internal_sender,
)
yield hm
@pytest.fixture()
def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record
@pytest.fixture()
def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set()
@pytest.fixture()
def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set()
@pytest.fixture()
def _start_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, st)
yield start_backend_func
@pytest.fixture()
def _stop_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
collect_responses,
):
def stop_backend_func(threads=None):
threads = threads or ()
done = False
_internal_sender.publish_exit(0)
for _ in range(30):
poll_exit_resp = _internal_sender.communicate_poll_exit()
if poll_exit_resp:
done = poll_exit_resp.done
if done:
collect_responses.local_info = poll_exit_resp.local_info
break
time.sleep(1)
_internal_sender.join()
for t in threads:
t.join()
assert done, "backend didnt shutdown"
yield stop_backend_func
@pytest.fixture()
def backend_interface(_start_backend, _stop_backend, _internal_sender):
@contextmanager
def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads)
return backend_context
@pytest.fixture
def publish_util(
mocked_run, mock_server, backend_interface, parse_ctx,
):
def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util
yield fn
@pytest.fixture
def tbwatcher_util(mocked_run, mock_server, internal_hm, backend_interface, parse_ctx):
def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx)
class Responses:
pass
@pytest.fixture
def collect_responses():
responses = Responses()
yield responses
|
record.py | #!/usr/bin/env python3
import json
import os
import threading
import time
import logging
from pyMusicSync import utils
class Record:
""" Class representing a record file to keep track of converted and synced file """
threadLock = threading.Lock()
filePath = ""
record = {} # <trackID>:<filePath>
def __init__(self, filePath, interval):
self.filePath = filePath
if not (os.path.isfile(self.filePath)):
self.write()
self.read()
self.killed = False
self.hasUnsavedChanges = False
self.autosaveInterval = interval
self.autosaveThread = threading.Thread(target=self.__autosave)
def read(self):
with open(self.filePath) as f:
self.record = json.load(f)
def add(self, track):
# Not thread-safe
with self.threadLock:
self.record[track.trackID] = track.syncedFilePath
self.hasUnsavedChanges = True
def remove(self, item):
with self.threadLock:
del self.record[item]
self.hasUnsavedChanges = True
def __contains__(self, item):
with self.threadLock:
trackID = utils.genID(item)
return trackID in self.record
def get(self, item):
return self.record[item]
def write(self):
with self.threadLock:
with open(self.filePath, "w") as f:
json.dump(self.record, f, indent=4)
def idList(self):
return list(self.record.keys())
def __autosave(self):
lastTime = time.monotonic()
while True:
if self.killed:
return
currTime = time.monotonic()
if self.hasUnsavedChanges and (currTime - lastTime >= self.autosaveInterval):
logging.debug("writing changes")
self.write()
self.hasUnsavedChanges = False
lastTime = currTime
time.sleep(0.1)
def startAutosave(self):
self.autosaveThread.start()
def killAutosave(self):
self.killed = True
|
_threading.py | __all__ = ('run_in_thread', 'run_in_executer', )
from threading import Thread
from kivy.clock import Clock
import asynckivy
def _wrapper(func, event):
ret = None
exc = None
try:
ret = func()
except Exception as e:
exc = e
finally:
Clock.schedule_once(lambda __: event.set((ret, exc, )))
async def run_in_thread(func, *, daemon=False):
event = asynckivy.Event()
Thread(
name='asynckivy.run_in_thread',
target=_wrapper, daemon=daemon, args=(func, event, ),
).start()
ret, exc = await event.wait()
if exc is not None:
raise exc
return ret
async def run_in_executer(func, executer):
event = asynckivy.Event()
future = executer.submit(_wrapper, func, event)
try:
ret, exc = await event.wait()
except GeneratorExit:
future.cancel()
raise
assert future.done()
if exc is not None:
raise exc
return ret
|
engine.py | """"""
from threading import Thread
from queue import Queue, Empty
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
SubscribeRequest,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import EVENT_TICK, EVENT_CONTRACT
from vnpy.trader.utility import load_json, save_json, BarGenerator
from vnpy.trader.database import database_manager
APP_NAME = "DataRecorder"
EVENT_RECORDER_LOG = "eRecorderLog"
EVENT_RECORDER_UPDATE = "eRecorderUpdate"
class RecorderEngine(BaseEngine):
""""""
setting_filename = "data_recorder_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.queue = Queue()
self.thread = Thread(target=self.run)
self.active = False
self.tick_recordings = {}
self.bar_recordings = {}
self.bar_generators = {}
self.load_setting()
self.register_event()
self.start()
self.put_event()
def load_setting(self):
""""""
setting = load_json(self.setting_filename)
self.tick_recordings = setting.get("tick", {})
self.bar_recordings = setting.get("bar", {})
def save_setting(self):
""""""
setting = {
"tick": self.tick_recordings,
"bar": self.bar_recordings
}
save_json(self.setting_filename, setting)
def run(self):
""""""
while self.active:
try:
task = self.queue.get(timeout=1)
task_type, data = task
if task_type == "tick":
database_manager.save_tick_data([data])
elif task_type == "bar":
database_manager.save_bar_data([data])
except Empty:
continue
def close(self):
""""""
self.active = False
if self.thread.isAlive():
self.thread.join()
def start(self):
""""""
self.active = True
self.thread.start()
def add_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.bar_recordings:
self.write_log(f"已在K线记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.bar_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加K线记录成功:{vt_symbol}")
def add_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.tick_recordings:
self.write_log(f"已在Tick记录列表中:{vt_symbol}")
return
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.tick_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
self.save_setting()
self.put_event()
self.write_log(f"添加Tick记录成功:{vt_symbol}")
def remove_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.bar_recordings:
self.write_log(f"不在K线记录列表中:{vt_symbol}")
return
self.bar_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除K线记录成功:{vt_symbol}")
def remove_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.tick_recordings:
self.write_log(f"不在Tick记录列表中:{vt_symbol}")
return
self.tick_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除Tick记录成功:{vt_symbol}")
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
if tick.vt_symbol in self.tick_recordings:
self.record_tick(tick)
if tick.vt_symbol in self.bar_recordings:
bg = self.get_bar_generator(tick.vt_symbol)
bg.update_tick(tick)
def process_contract_event(self, event: Event):
""""""
contract = event.data
vt_symbol = contract.vt_symbol
self.add_tick_recording(vt_symbol) #添加tick合约信息到本地
self.add_bar_recording(vt_symbol) #添加bar合约信息到本地配置
if (vt_symbol in self.tick_recordings or vt_symbol in self.bar_recordings):
self.subscribe(contract)
def write_log(self, msg: str):
""""""
event = Event(
EVENT_RECORDER_LOG,
msg
)
self.event_engine.put(event)
def put_event(self):
""""""
tick_symbols = list(self.tick_recordings.keys())
tick_symbols.sort()
bar_symbols = list(self.bar_recordings.keys())
bar_symbols.sort()
data = {
"tick": tick_symbols,
"bar": bar_symbols
}
event = Event(
EVENT_RECORDER_UPDATE,
data
)
self.event_engine.put(event)
def record_tick(self, tick: TickData):
""""""
task = ("tick", copy(tick))
self.queue.put(task)
def record_bar(self, bar: BarData):
""""""
task = ("bar", copy(bar))
self.queue.put(task)
def get_bar_generator(self, vt_symbol: str):
""""""
bg = self.bar_generators.get(vt_symbol, None)
if not bg:
bg = BarGenerator(self.record_bar)
self.bar_generators[vt_symbol] = bg
return bg
def subscribe(self, contract: ContractData):
""""""
req = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
|
utils.py | import argparse
import os
import random
from threading import Thread
import numpy as np
import torch
import torch.distributed as dist
import wandb
class AverageMeter:
def __init__(self, name=None):
self.name = name
self.reset()
def reset(self):
self.sum = self.count = self.avg = 0
def update(self, val, n=1):
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def seed_everything(seed=3407):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(seed)
def set_grads(grads, params):
for g, p in zip(grads, params):
p.grad = g
def Threaded(fn): # annotation wrapper to launch a function as a thread
def wrapper(*args, **kwargs):
t = Thread(target=fn, args=args, kwargs=kwargs)
t.start()
return t
return wrapper
@Threaded
def log_imgs_wandb(**kwargs):
im_dict = {}
for im, arr in kwargs.items():
im_dict[im] = [wandb.Image(im) for im in tensors2im(arr)]
wandb.log(im_dict)
def tensors2im(x):
x = x.detach()
mx = x.amax(dim=(1, 2, 3), keepdim=True)
mn = x.amin(dim=(1, 2, 3), keepdim=True)
x = x.sub_(mn).div_(mx - mn)
x = x.mul_(255.).to(torch.uint8)
x = x.permute(0, 2, 3, 1)
return x.cpu().numpy()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss(loss):
world_size = get_world_size()
if world_size < 2:
return loss
with torch.no_grad():
dist.reduce(loss, dst=0, async_op=True)
if dist.get_rank() == 0:
loss /= world_size
return loss
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
if dist.get_world_size() == 1:
return
dist.barrier()
def cleanup(distributed=True):
if distributed:
dist.destroy_process_group()
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
|
test_wal_acceptor.py | import pytest
import random
import time
import os
import subprocess
import uuid
from contextlib import closing
from multiprocessing import Process, Value
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory, PgBin
from fixtures.utils import lsn_to_hex, mkdir_if_needed
pytest_plugins = ("fixtures.zenith_fixtures")
# basic test, write something in setup with wal acceptors, ensure that commits
# succeed and data is written
def test_normal_work(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
zenith_cli.run(["branch", "test_wal_acceptors_normal_work", "empty"])
wa_factory.start_n_new(3)
pg = postgres.create_start('test_wal_acceptors_normal_work',
wal_acceptors=wa_factory.get_connstrs())
with closing(pg.connect()) as conn:
with conn.cursor() as cur:
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
cur.execute('CREATE TABLE t(key int primary key, value text)')
cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (5000050000, )
# Run page server and multiple acceptors, and multiple compute nodes running
# against different timelines.
def test_many_timelines(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory):
n_timelines = 2
wa_factory.start_n_new(3)
branches = ["test_wal_acceptors_many_timelines_{}".format(tlin) for tlin in range(n_timelines)]
# start postgres on each timeline
pgs = []
for branch in branches:
zenith_cli.run(["branch", branch, "empty"])
pgs.append(postgres.create_start(branch, wal_acceptors=wa_factory.get_connstrs()))
# Do everything in different loops to have actions on different timelines
# interleaved.
# create schema
for pg in pgs:
pg.safe_psql("CREATE TABLE t(key int primary key, value text)")
# Populate data
for pg in pgs:
pg.safe_psql("INSERT INTO t SELECT generate_series(1,100000), 'payload'")
# Check data
for pg in pgs:
res = pg.safe_psql("SELECT sum(key) FROM t")
assert res[0] == (5000050000, )
# Check that dead minority doesn't prevent the commits: execute insert n_inserts
# times, with fault_probability chance of getting a wal acceptor down or up
# along the way. 2 of 3 are always alive, so the work keeps going.
def test_restarts(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory: WalAcceptorFactory):
fault_probability = 0.01
n_inserts = 1000
n_acceptors = 3
wa_factory.start_n_new(n_acceptors)
zenith_cli.run(["branch", "test_wal_acceptors_restarts", "empty"])
pg = postgres.create_start('test_wal_acceptors_restarts',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
failed_node = None
cur.execute('CREATE TABLE t(key int primary key, value text)')
for i in range(n_inserts):
cur.execute("INSERT INTO t values (%s, 'payload');", (i + 1, ))
if random.random() <= fault_probability:
if failed_node is None:
failed_node = wa_factory.instances[random.randrange(0, n_acceptors)]
failed_node.stop()
else:
failed_node.start()
failed_node = None
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (500500, )
start_delay_sec = 2
def delayed_wal_acceptor_start(wa):
time.sleep(start_delay_sec)
wa.start()
# When majority of acceptors is offline, commits are expected to be frozen
def test_unavailability(zenith_cli, postgres: PostgresFactory, wa_factory):
wa_factory.start_n_new(2)
zenith_cli.run(["branch", "test_wal_acceptors_unavailability", "empty"])
pg = postgres.create_start('test_wal_acceptors_unavailability',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
# check basic work with table
cur.execute('CREATE TABLE t(key int primary key, value text)')
cur.execute("INSERT INTO t values (1, 'payload')")
# shutdown one of two acceptors, that is, majority
wa_factory.instances[0].stop()
proc = Process(target=delayed_wal_acceptor_start, args=(wa_factory.instances[0], ))
proc.start()
start = time.time()
cur.execute("INSERT INTO t values (2, 'payload')")
# ensure that the query above was hanging while acceptor was down
assert (time.time() - start) >= start_delay_sec
proc.join()
# for the world's balance, do the same with second acceptor
wa_factory.instances[1].stop()
proc = Process(target=delayed_wal_acceptor_start, args=(wa_factory.instances[1], ))
proc.start()
start = time.time()
cur.execute("INSERT INTO t values (3, 'payload')")
# ensure that the query above was hanging while acceptor was down
assert (time.time() - start) >= start_delay_sec
proc.join()
cur.execute("INSERT INTO t values (4, 'payload')")
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (10, )
# shut down random subset of acceptors, sleep, wake them up, rinse, repeat
def xmas_garland(acceptors, stop):
while not bool(stop.value):
victims = []
for wa in acceptors:
if random.random() >= 0.5:
victims.append(wa)
for v in victims:
v.stop()
time.sleep(1)
for v in victims:
v.start()
time.sleep(1)
# value which gets unset on exit
@pytest.fixture
def stop_value():
stop = Value('i', 0)
yield stop
stop.value = 1
# do inserts while concurrently getting up/down subsets of acceptors
def test_race_conditions(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, wa_factory, stop_value):
wa_factory.start_n_new(3)
zenith_cli.run(["branch", "test_wal_acceptors_race_conditions", "empty"])
pg = postgres.create_start('test_wal_acceptors_race_conditions',
wal_acceptors=wa_factory.get_connstrs())
# we rely upon autocommit after each statement
# as waiting for acceptors happens there
pg_conn = pg.connect()
cur = pg_conn.cursor()
cur.execute('CREATE TABLE t(key int primary key, value text)')
proc = Process(target=xmas_garland, args=(wa_factory.instances, stop_value))
proc.start()
for i in range(1000):
cur.execute("INSERT INTO t values (%s, 'payload');", (i + 1, ))
cur.execute('SELECT sum(key) FROM t')
assert cur.fetchone() == (500500, )
stop_value.value = 1
proc.join()
class ProposerPostgres:
"""Object for running safekeepers sync with walproposer"""
def __init__(self, pgdata_dir: str, pg_bin: PgBin, timeline_id: str, tenant_id: str):
self.pgdata_dir: str = pgdata_dir
self.pg_bin: PgBin = pg_bin
self.timeline_id: str = timeline_id
self.tenant_id: str = tenant_id
def pg_data_dir_path(self) -> str:
""" Path to data directory """
return self.pgdata_dir
def config_file_path(self) -> str:
""" Path to postgresql.conf """
return os.path.join(self.pgdata_dir, 'postgresql.conf')
def create_dir_config(self, wal_acceptors: str):
""" Create dir and config for running --sync-safekeepers """
mkdir_if_needed(self.pg_data_dir_path())
with open(self.config_file_path(), "w") as f:
f.writelines([
"synchronous_standby_names = 'walproposer'\n",
f"zenith.zenith_timeline = '{self.timeline_id}'\n",
f"zenith.zenith_tenant = '{self.tenant_id}'\n",
f"wal_acceptors = '{wal_acceptors}'\n",
])
def sync_safekeepers(self) -> str:
"""
Run 'postgres --sync-safekeepers'.
Returns execution result, which is commit_lsn after sync.
"""
command = ["postgres", "--sync-safekeepers"]
env = {
"PGDATA": self.pg_data_dir_path(),
}
basepath = self.pg_bin.run_capture(command, env)
stdout_filename = basepath + '.stdout'
with open(stdout_filename, 'r') as stdout_f:
stdout = stdout_f.read()
return stdout.strip("\n ")
# insert wal in all safekeepers and run sync on proposer
def test_sync_safekeepers(repo_dir: str, pg_bin: PgBin, wa_factory: WalAcceptorFactory):
wa_factory.start_n_new(3)
timeline_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# write config for proposer
pgdata_dir = os.path.join(repo_dir, "proposer_pgdata")
pg = ProposerPostgres(pgdata_dir, pg_bin, timeline_id, tenant_id)
pg.create_dir_config(wa_factory.get_connstrs())
# valid lsn, which is not in the segment start, nor in zero segment
epoch_start_lsn = 0x16B9188 # 0/16B9188
begin_lsn = epoch_start_lsn
# append and commit WAL
lsn_after_append = []
for i in range(3):
res = wa_factory.instances[i].append_logical_message(
tenant_id,
timeline_id,
{
"lm_prefix": "prefix",
"lm_message": "message",
"set_commit_lsn": True,
"term": 2,
"begin_lsn": begin_lsn,
"epoch_start_lsn": epoch_start_lsn,
"truncate_lsn": epoch_start_lsn,
},
)
lsn_hex = lsn_to_hex(res["inserted_wal"]["end_lsn"])
lsn_after_append.append(lsn_hex)
print(f"safekeeper[{i}] lsn after append: {lsn_hex}")
# run sync safekeepers
lsn_after_sync = pg.sync_safekeepers()
print(f"lsn after sync = {lsn_after_sync}")
assert all(lsn_after_sync == lsn for lsn in lsn_after_append)
|
frontend_test.py | #!/usr/bin/env python
"""Unittest for grr http server."""
import hashlib
import ipaddress
import os
import socket
import threading
import time
from absl import app
import portpicker
import requests
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import file_store
from grr_response_server.bin import frontend
from grr_response_server.databases import db
from grr_response_server.flows.general import file_finder
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import worker_mocks
class GRRHTTPServerTest(test_lib.GRRBaseTest):
"""Test the http server."""
@classmethod
def setUpClass(cls):
super(GRRHTTPServerTest, cls).setUpClass()
# Bring up a local server for testing.
port = portpicker.pick_unused_port()
ip = utils.ResolveHostnameToIP("localhost", port)
cls.httpd = frontend.GRRHTTPServer((ip, port),
frontend.GRRHTTPServerHandler)
if ipaddress.ip_address(ip).version == 6:
cls.address_family = socket.AF_INET6
cls.base_url = "http://[%s]:%d/" % (ip, port)
else:
cls.address_family = socket.AF_INET
cls.base_url = "http://%s:%d/" % (ip, port)
cls.httpd_thread = threading.Thread(
name="GRRHTTPServerTestThread", target=cls.httpd.serve_forever)
cls.httpd_thread.daemon = True
cls.httpd_thread.start()
@classmethod
def tearDownClass(cls):
cls.httpd.Shutdown()
cls.httpd_thread.join()
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def tearDown(self):
super().tearDown()
# Wait until all pending http requests have been handled.
for _ in range(100):
if frontend.GRRHTTPServerHandler.active_counter == 0:
return
time.sleep(0.01)
self.fail("HTTP server thread did not shut down in time.")
def testServerPem(self):
req = requests.get(self.base_url + "server.pem")
self.assertEqual(req.status_code, 200)
self.assertIn(b"BEGIN CERTIFICATE", req.content)
def _RunClientFileFinder(self,
paths,
action,
network_bytes_limit=None,
client_id=None):
client_id = client_id or self.SetupClient(0)
with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}):
session_id = flow_test_lib.TestFlowHelper(
file_finder.ClientFileFinder.__name__,
action_mocks.ClientFileFinderClientMock(
client_worker=worker_mocks.FakeClientWorker()),
client_id=client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
network_bytes_limit=network_bytes_limit,
creator=self.test_username)
return session_id
def testClientFileFinderUpload(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
for r in results:
data = open(r.stat_entry.pathspec.path, "rb").read()
fd = file_store.OpenFile(
db.ClientPath.FromPathSpec(self.client_id, r.stat_entry.pathspec))
self.assertEqual(fd.read(100), data[:100])
self.assertEqual(fd.hash_id.AsBytes(), hashlib.sha256(data).digest())
def testClientFileFinderUploadLimit(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
# TODO(hanuszczak): Instead of catching arbitrary runtime errors, we should
# catch specific instance that was thrown. Unfortunately, all errors are
# intercepted in the `MockWorker` class and converted to runtime errors.
with self.assertRaisesRegex(RuntimeError, "exceeded network send limit"):
with test_lib.SuppressLogs():
self._RunClientFileFinder(paths, action, network_bytes_limit=1500)
def testClientFileFinderUploadBound(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download(
oversized_file_policy="DOWNLOAD_TRUNCATED", max_size=300)
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
def testClientFileFinderUploadSkip(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download(
oversized_file_policy="SKIP", max_size=300)
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
skipped = []
uploaded = []
for result in results:
if result.HasField("transferred_file"):
uploaded.append(result)
else:
skipped.append(result)
self.assertLen(uploaded, 2)
self.assertLen(skipped, 3)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in uploaded
]
self.assertCountEqual(relpaths, ["History.plist", "test.plist"])
def testClientFileFinderFilestoreIntegration(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
client_ids = self.SetupClients(2)
session_ids = {
c: self._RunClientFileFinder(paths, action, client_id=c)
for c in client_ids
}
results_per_client = {
c: flow_test_lib.GetFlowResults(c, session_id)
for c, session_id in session_ids.items()
}
for results in results_per_client.values():
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
def main(args):
test_lib.main(args)
if __name__ == "__main__":
app.run(main)
|
system_metrics_monitor.py | import time
import datetime as dt
import threading
import sys
import argparse
import psutil
import firebase_util
from core_data_modules.logging import Logger
log = Logger(__name__)
firebase_client = None
COLLECTION = 'pipeline_system_metrics' #name of the firebase collections to store metrics
DEFAULT_INTERVAL = 600 # wait interval between each set of metric readings in seconds
def get_and_publish_system_metrics(interval):
while True:
metrics = {}
# record datetime
metrics['datetime'] = dt.datetime.now(dt.timezone.utc).isoformat()
# current cpu utlization
cpu_utilization = psutil.cpu_percent(interval=0.1)
metrics['cpu_percent'] = cpu_utilization
# cpu load over the last 1, 5 and 15 minutes in percentage
cpu_load = [round((value / psutil.cpu_count() * 100), 2)
for value in psutil.getloadavg()]
metrics['cpu_load_interval_percent'] = dict(
{
'1min': cpu_load[0],
'5min': cpu_load[1],
'15min': cpu_load[2]
}
)
# memory usage
memory_usage = psutil.virtual_memory()
metrics['memory_usage'] = dict(
{
'available': memory_usage[1],
'used': memory_usage[3],
'percent': memory_usage[2],
'free': memory_usage[4]
}
)
# disk usage
metrics['disk_usage'] = []
for partition in psutil.disk_partitions():
disk_usage = dict(psutil.disk_usage(partition[0])._asdict())
disk_usage['disk'] = partition[0]
metrics['disk_usage'].append(disk_usage)
log.info("Recorded metrics: {}".format(metrics))
publish_metrics_to_firestore(metrics)
time.sleep(interval)
def publish_metrics_to_firestore(metrics):
firebase_client.collection(COLLECTION).add(metrics)
log.info("Successfully published metrics to firebase {} collection".format(COLLECTION))
def run_system_metric_monitor(interval=DEFAULT_INTERVAL):
parser = argparse.ArgumentParser(description='Retrieve system metrics i.e cpu utilization, memory & disk usage')
parser.add_argument("crypto_token_file", type=str, help="path to Firebase crypto token file")
args = parser.parse_args()
firebase_client = firebase_util.init_firebase_client(args.crypto_token_file)
runner = threading.Thread(target=get_and_publish_system_metrics, args=(interval,))
runner.start()
run_system_metric_monitor()
|
ntpot.py | #!/usr/bin/env python3
import os
import signal
import threading
from . import ntp
import core.potloader as potloader
import core.utils as utils
from .dblogger import DBThread
class NTPot(potloader.PotLoader):
""" Implementation of NTP honeypot that responds to NTP messages type 3, 6 and 7.
"""
def name(self):
return 'ntp'
def _create_server(self):
# general assumption is that the parameters (log_queue, output_queue)
# are already properly created/configured prior to calling this function
return ntp.create_server(
self.conf,
self.name(),
self.log_queue,
self.output_queue,
self.hpfeeds_client,
self.alerter
)
def _create_dbthread(self, dbfile, new_attack_interval):
return DBThread(
dbfile,
self.name(),
self.log_queue,
self.output_queue,
self.stop_event,
new_attack_interval
)
def _start_server(self):
self.server.serve_forever()
def _get_config_path(self):
return os.path.join(os.path.dirname(__file__), 'ntpot.conf')
def _detailed_status(self, status):
avg_amp = float('{0:.2f}'.format(status['avg_amp']))
pkt_in_bytes = utils.format_unit(status['packets_in_bytes'])
modes = ()
for mode_pair in status['modes']:
mode = mode_pair[0]
mode_count = mode_pair[1]
if mode == 1 or mode == 2:
mode_str = 'SYMMETRIC (1 or 2): '
elif mode == 3:
mode_str = 'CLIENT (3): '
elif mode == 4:
mode_str = 'SERVER (4): '
elif mode == 5:
mode_str = 'BROADCAST (5): '
elif mode == 6:
mode_str = 'CONTROL (6): '
elif mode == 7:
mode_str = 'PRIVATE/MONLIST (7): '
else:
mode_str = 'UNKNOWN (%s): ' % (mode)
count = utils.sep_thousand(mode_count)
mode_str += count
modes += (mode_str,)
stats = [
['Average amplification', utils.sep_thousand(avg_amp)],
['Traffic IN/OUT', pkt_in_bytes],
['NTP modes distribution', modes],
]
return stats
if __name__ == "__main__":
ntpot = NTPot()
ntpot.setup()
t = threading.Thread(target=ntpot.run)
t.start()
ntpot.potthread = t
signal.signal(signal.SIGINT, ntpot.shutdown_signal_wrapper)
signal.pause()
|
data_utils.py | # Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import errno
import functools
import hashlib
import multiprocessing
import multiprocessing.dummy
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import weakref
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from tensorflow.python.framework import ops
from six.moves.urllib.request import urlopen
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.io_utils import path_to_string
from tensorflow.python.util.tf_export import keras_export
try:
import queue
except ImportError:
import Queue as queue
try:
import typing
is_iterator = lambda x: isinstance(x, typing.Iterator)
except ImportError:
# Python2 uses next, and Python3 should have typing so __next__ is not needed.
is_iterator = lambda x: hasattr(x, '__iter__') and hasattr(x, 'next')
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (ops.Tensor, np.ndarray) + builtin_iterators):
return False
return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or is_iterator(x)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
file_path = path_to_string(file_path)
path = path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = path_to_string(fname)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
if six.PY2:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter(object):
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
|
plugin.py | # -*- coding: utf-8 -*-
import time
import logging
import traceback
import sys
import json
from threading import Thread
from mamonsu.lib.const import Template
class PluginDisableException(Exception):
pass
class Plugin(object):
# plugin interval run
Interval = 60
# plugin config
DEFAULT_CONFIG = {} # type: Dict[str, str]
_thread = None # type: Thread
_sender = False
_enabled = True
# for all childs
is_child = True
# const
DELTA = Template.DELTA
GRAPH_TYPE = Template.GRAPH_TYPE
VALUE_TYPE = Template.VALUE_TYPE
UNITS = Template.UNITS
DELTA_SPEED = Template.DELTA.speed_per_second
DELTA_CHANGE = Template.DELTA.simple_change
def __init__(self, config):
self.config = config
self.log = logging.getLogger(
self.__class__.__name__.upper())
self.sender = None
self.last_error_text = ''
# from config => _plugin_config
self._plugin_config = {}
name = self.__class__.__name__.lower()
if self.config.has_plugin_config(name):
for x in self.config.plugin_options(name):
self._plugin_config[x] = self.config.fetch(name, x)
@classmethod
def only_child_subclasses(self):
plugins = []
for klass in self.__subclasses__():
if klass.is_child:
plugins.append(klass)
plugins.extend(klass.only_child_subclasses())
return plugins
@classmethod
def set_default_config(cls, config):
name = cls.__name__.lower()
# if section already loaded via config file
if not config.has_section(name) and len(cls.DEFAULT_CONFIG) > 0:
config.add_section(name)
for x in cls.DEFAULT_CONFIG:
if config.has_option(name, x):
continue
value = cls.DEFAULT_CONFIG[x]
if not isinstance(value, str):
sys.stderr.write(
'Config value {0} in section {1} must'
' be string! Fix plugin please.\n'.format(x, name))
config.set(name, x, '{0}'.format(cls.DEFAULT_CONFIG[x]))
# get value from config
def plugin_config(self, name, as_json=False):
if name not in self._plugin_config:
return None
if as_json:
return json.loads(self._plugin_config[name])
else:
return self._plugin_config[name]
def start(self):
self._thread = Thread(target=self._loop)
self._thread.daemon = True
self._thread.start()
self.log.info('started ...')
def is_alive(self):
if self._thread is not None:
return self._thread.is_alive()
return False
def run(self, sender):
return None
def is_sender(self):
return self._sender
def is_enabled(self):
if self.plugin_config('enabled') == 'False':
return False
return self._enabled
def disable(self):
self._enabled = False
def set_sender(self, sender):
self.sender = sender
def items(self, template):
return None
def graphs(self, template):
return None
def triggers(self, template):
return None
def discovery_rules(self, template):
return None
def _log_exception(self, e, trace):
self.last_error_text = 'catch error: {0}'.format(e)
self.log.error(self.last_error_text)
self.log.info('hint: enable debug level to full exception trace')
self.log.debug(trace)
def _loop(self):
while(True):
last_start = time.time()
try:
self.run(self.sender)
except PluginDisableException as e:
text = 'disable plugin: {0}.'.format(e)
self.log.info(text)
return
except Exception as e:
trace = traceback.format_exc()
self._log_exception(e, trace)
return
sleep_time = self.Interval - int(time.time() - last_start)
if sleep_time > 0:
time.sleep(sleep_time)
else:
self.log.error(
'Timeout: {0}s'.format(int(time.time() - last_start)))
return
|
config.py | """Configuration Utilities."""
import configparser
import inspect
import os
import subprocess
import sys
import threading
import requests
quiet_STDOUT = True
#
# Interactions
#
def send(info, force=False):
"""Force output to parent application."""
if not quiet_STDOUT or force:
print('{}'.format(info))
sys.stdout.flush()
def quiet_logging(new_value=True):
"""Toggle logging."""
global quiet_STDOUT
quiet_STDOUT = new_value
def ifttt(event, dataset={'value1': ''}):
"""Trigger IFTTT Maker Event."""
key = read_ini('IFTTT', 'key', filename='secret')
try:
requests.post('https://maker.ifttt.com/trigger/{}/with/key/{}'.format(event, key), data=dataset)
except: # noqa
print('IFTTT Failed - possible loss of INTERNET connection')
#
# INI tools
#
def _ini_path(filename='pins'):
"""Get ini file path."""
cwd = os.getcwd()
if 'Python' in cwd:
if 'modules' in cwd:
return '{}/../{}.ini'.format(cwd, filename)
else:
return '{}/{}.ini'.format(cwd, filename)
else:
return '{}/Python/{}.ini'.format(cwd, filename)
def get_pin(component, param, _eval=True):
"""Get pin numbering value from a shared ini file."""
raw = read_ini(component, param, filename='pins')
return eval(raw) if _eval else raw
def read_ini(component, param, filename='pins'):
"""Read ini file."""
config = configparser.RawConfigParser()
file = _ini_path(filename)
try:
config.read(file)
return config.get(component, param)
except: # noqa
raise Exception('Failed to load `{}` and `{}` from: {}'.format(
component, param, file))
def write_ini(component, param, value):
"""Write to ini file."""
pin_config = configparser.RawConfigParser()
file = _ini_path()
pin_config.read(file)
with open(file, 'w') as cfgfile:
pin_config.set(component, param, value)
pin_config.write(cfgfile)
def check_status():
"""Return True, if alarm is to continue running, else is False."""
stat = get_pin('Alarm_Status', 'running', _eval=False)
return 'true' in stat.lower()
#
# PWM Tools
#
def set_pwm(pin_num, percent, quiet=False):
"""Run PWM commands through Pi-Blaster."""
# ex: echo '22=0.0' > /dev/pi-blaster
cmd = 'echo "{:02}={:0.2f}" > /dev/pi-blaster'.format(int(pin_num), float(percent))
if not quiet:
send(cmd)
return False if not is_pi() else subprocess.call(cmd, shell=True)
def release_pwm(pin_num):
"""Release pin from Pi-Blaster."""
cmd = 'echo "release {:02}" > /dev/pi-blaster'.format(pin_num)
send(cmd)
return subprocess.call(cmd, shell=True)
#
# Try evaluating unknown inputs:
#
def try_eval(raw):
"""Try to find a True/False, Integer, etc. value in string input."""
raw = raw.strip()
try:
return eval(raw)
except:
return raw
def dict_arg(args, key):
"""Try to decode the dictionary key or return False."""
try:
this = args[key]
send('@> {} found in `{}`'.format(key, args))
return this
except:
# send('@X {} not found in `{}`'.format(key, args))
return False
#
# File system
#
def is_pi():
"""Check if running on a Raspberry Pi."""
return 'pi' in os.path.abspath('')
def get_path(raw):
"""Get full path."""
if 'Python' not in raw:
full = '{}/Python/{}'.format(os.path.abspath(''), raw)
send('Setting r: {} to f: {}'.format(raw, full))
return full
else:
return raw
def is_running(task):
"""Use `ps aux` to check if a script is actively running."""
# output = 256 if running, else = 0
output = os.system('ps aux | grep {}'.format(task))
is_active = output == 0
send('Is `{}` running? > {} ({})'.format(task, is_active, output))
return is_active
#
# Other
#
def parse_argv(sys_in, arg_num=1):
"""Parse arguments."""
return str(sys_in.argv[arg_num]).strip().lower()
def thread(target, args=()):
"""Start thread for parallel processes."""
this = threading.Thread(target=target, args=args)
this.daemon = True # will only run if main thread running
this.start()
return this
# # Stop pi-blaster / or any process:
# print os.system('sudo kill $(ps aux | grep 'pi-blaster\/[p]' +
# 'i-blaster' | awk '{print $2}')')
# sudo kill $(ps aux | grep 'pi-blaster\/[p]i-blaster' | awk '{print $2}')
class Logger(object):
"""Simple custom logger handler.
lgr = cg.logger('name')
lgr.lit(lgr.ln(), 'A little alert!! With line number!')
"""
def __init__(self, origin=False):
"""Initializer."""
assert len(origin) == 6, 'Origin must be 6 letters ({} - is not)'.format(origin)
self.__origin = origin if origin else ' br'
def ln(self):
"""Get line number for logging."""
return '{:03d}'.format(inspect.currentframe().f_back.f_lineno)
def _format(self, ln, message):
return '{} (#{}): {}'.format(self.__origin, ln, message.strip())
def lit(self, ln, message, print_out=True):
"""Minor - two line comment."""
send(self._format(ln, message))
send(self._format(ln, ''))
send(self._format(ln, message))
def big(self, ln, message):
"""Major - five line comment."""
send(self._format(ln, '__'))
send(self._format(ln, ''))
send(self._format(ln, message))
send(self._format(ln, ''))
send(self._format(ln, '__'))
|
test_triggers.py | # (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import contextlib
import logging
import os
import time
from threading import Thread
import pytest
import yaml
from flask import Flask, request
from pyaviso import logger, user_config
from pyaviso.authentication import auth
from pyaviso.engine import engine_factory as ef
from pyaviso.event_listeners import event_listener_factory as elf
from pyaviso.event_listeners.listener_schema_parser import ListenerSchemaParser
@pytest.fixture()
def conf() -> user_config.UserConfig: # this automatically configure the logging
c = user_config.UserConfig(conf_path="tests/config.yaml")
return c
@pytest.fixture()
def listener_factory(conf):
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
# Load the schema
listener_schema = ListenerSchemaParser().load(conf)
listener_factory = elf.EventListenerFactory(engine_factory, listener_schema)
return listener_factory
@contextlib.contextmanager
def caplog_for_logger(caplog): # this is needed to assert over the logging output
caplog.clear()
lo = logging.getLogger()
lo.addHandler(caplog.handler)
caplog.handler.setLevel(logging.DEBUG)
yield
lo.removeHandler(caplog.handler)
def test_echo_trigger(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/echo_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
# check the trigger has logged the notification on the system log
assert (
"{'date': '20210101', 'country': 'italy', 'airport': 'FCO', 'number': 'AZ203'}, 'payload': 'Landed'}"
in caplog.text
)
assert "Echo Trigger completed" in caplog.text
def test_function_trigger(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create a list that increments every time there is a new event
trigger_list = []
def trigger_function(notification):
trigger_list.append(notification["payload"])
trigger = {"type": "function", "function": trigger_function}
# create a listener that uses that trigger
request = {"country": "Italy"}
listener = {"event": "flight", "request": request, "triggers": [trigger]}
listeners = {"listeners": [listener]}
# parse it
listeners: list = listener_factory.create_listeners(listeners)
assert listeners.__len__() == 1
listener = listeners.pop()
# create independent client to trigger the notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
assert trigger_list.__len__() == 1
def test_logger_listener(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/log_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
# check the trigger has logged the notification on the system log
assert "Log Trigger completed" in caplog.text
# check the trigger has logged the notification on the log specified
with open(listener.triggers[0].get("path"), "r") as f:
assert "Notification received" in f.read()
# clean up
if os.path.exists("testLog.log"):
os.remove("testLog.log")
def test_command_listener(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/command_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert "Command Trigger completed" in caplog.text
def test_command_json_listener(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/command_json_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert "Command Trigger completed" in caplog.text
def test_command_json_path_listener(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/command_json_path_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert "Command Trigger completed" in caplog.text
# test frontend
test_frontend = Flask("Test_Frontend")
@test_frontend.route("/test", methods=["POST"])
def received():
return f"Received {request.json}"
# test_frontend.run(host="127.0.0.1", port=8001)
def test_post_complete_listener(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/post_complete_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# start a test frontend to send the notification to
server = Thread(target=test_frontend.run, daemon=True, kwargs={"host": "127.0.0.1", "port": 8051})
server.start()
time.sleep(1)
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert "Post Trigger completed" in caplog.text
assert "CloudEvents notification sent successfully" in caplog.text
def test_multiple_nots_echo(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/echo_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a stream of notifications
n_nots = 10
for i in range(0, n_nots):
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged twice as there are n_puts notifications
for record in caplog.records:
assert record.levelname != "ERROR"
assert caplog.text.count("Echo Trigger completed") == n_nots
def test_multiple_nots_cmd(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/command_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
# simulate a stream of notifications
n_nots = 10
for i in range(0, n_nots):
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the change has been logged twice as there are n_puts notifications
for record in caplog.records:
assert record.levelname != "ERROR"
assert caplog.text.count("Command Trigger completed") == n_nots
def test_multiple_listeners(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/multiple_listeners.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 3
# simulate a notification for all listeners
for listener in listeners:
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the changes has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert caplog.text.count("Echo Trigger completed") == 3
def test_multiple_triggers(conf, listener_factory, caplog):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
with caplog_for_logger(caplog): # this allows to assert over the logging output
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/multiple_triggers.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
assert listener.triggers.__len__() == 3
# simulate a notification
listener.callback("/tmp/aviso/flight/20210101/italy/FCO/AZ203", "Landed")
time.sleep(1)
# check if the changes has been logged
for record in caplog.records:
assert record.levelname != "ERROR"
assert "Echo Trigger completed" in caplog.text
assert "Log Trigger completed" in caplog.text
assert "Command Trigger completed" in caplog.text
# clean up
if os.path.exists("testLog.log"):
os.remove("testLog.log")
if os.path.exists("test.txt"):
os.remove("test.txt")
|
debug_tracker.py | import socket
import pickle
import time
import sched
import numpy as np
from tracker.debug.debug_command import DebugCommand
import threading
class Debug:
def __init__(self, tracker, address):
self.ui_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ui_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.ui_server.connect(address)
self.tracker = tracker
self.tracker.logger.info('UI server create. ({}:{})'.format(*address))
self._thread = threading.Thread(target=self.running_thread, daemon=True)
self.debug_fps = 20
self.ui_commands = []
def start(self):
self._thread.start()
def add_robot_position_commands(self, pos, color=(0, 255, 0), color_angle=(0, 0, 0), radius=90):
player_center = (pos[0], pos[1])
data_circle = {'center': player_center,
'radius': radius,
'color': color,
'is_fill': True,
'timeout': 0.08}
self.ui_commands.append(DebugCommand(3003, data_circle))
end_point = np.array([pos[0], pos[1]]) + 90 * np.array([np.cos(pos[2]), np.sin(pos[2])])
end_point = (end_point[0], end_point[1])
data_line = {'start': player_center,
'end': end_point,
'color': color_angle,
'timeout': 0.08}
self.ui_commands.append(DebugCommand(3001, data_line))
def add_balls_position_commands(self, pos, color=(255, 127, 80)):
player_center = (pos[0], pos[1])
data_circle = {'center': player_center,
'radius': 150,
'color': color,
'is_fill': True,
'timeout': 0.06}
self.ui_commands.append(DebugCommand(3003, data_circle))
def send_ui_commands(self):
for robot in self.tracker._yellow_team:
if robot.last_observation is not None:
pos_raw = robot.last_observation
self.add_robot_position_commands(pos_raw, color=(255, 0, 0), radius=10, color_angle=(255, 0, 0))
if robot.pose is not None:
pos_filter = robot.pose
self.add_robot_position_commands(pos_filter, color=(255, 255, 0))
for robot in self.tracker._blue_team:
if robot.last_observation is not None:
pos_raw = robot.last_observation
self.add_robot_position_commands(pos_raw, color=(255, 0, 0), radius=10, color_angle=(255, 0, 0))
if robot.pose is not None:
pos_filter = robot.pose
self.add_robot_position_commands(pos_filter, color=(0, 0, 255))
for ball in self.tracker._balls.considered_balls:
if ball.pose is not None:
pos_filter = ball.pose
self.add_balls_position_commands(pos_filter, color=(255, 0, 0))
for ball in self.tracker._balls:
if ball.last_observation is not None:
pos_raw = ball.last_observation
self.add_balls_position_commands(pos_raw, color=(255, 255, 255))
if ball.pose is not None:
pos_filter = ball.pose
self.add_balls_position_commands(pos_filter, color=(255, 100, 0))
try:
for cmd in self.ui_commands:
self.ui_server.send(pickle.dumps(cmd.get_packet()))
except ConnectionRefusedError:
pass
self.ui_commands.clear()
def print_info(self):
if 0.95 < time.time() % 1 < 1:
print('Balls confidence:',
' '.join('{:.1f}'.format(ball.confidence) for ball in self.tracker._balls.considered_balls),
'Balls: ',
' '.join('{}'.format(id(ball)) for ball in self.tracker._balls))
print(self.tracker.track_frame)
def scheduled_loop(self, scheduler):
if self.tracker.is_running:
scheduler.enter(1 / self.debug_fps, 3, self.scheduled_loop, (scheduler,))
self.send_ui_commands()
self.print_info()
def running_thread(self):
sc = sched.scheduler(time.time, time.sleep)
sc.enter(1 / self.debug_fps, 1, self.scheduled_loop, (sc,))
sc.run()
|
thruster_manager.py | # Copyright (c) 2020 The Plankton Authors.
# All rights reserved.
#
# This source code is derived from UUV Simulator
# (https://github.com/uuvsimulator/uuv_simulator)
# Copyright (c) 2016-2019 The UUV Simulator Authors
# licensed under the Apache license, Version 2.0
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from os.path import isdir, join
from time import sleep
import xml.etree.ElementTree as etree
import yaml
from geometry_msgs.msg import Wrench
import tf2_ros
import rclpy
from rclpy.node import Node
from .models import Thruster
from plankton_utils.param_helper import parse_nested_params_to_dict
from tf_quaternion import transformations
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
import threading
class ThrusterManager(Node):
"""
The thruster manager generates the thruster allocation matrix using the
TF information and publishes the thruster forces assuming the the thruster
topics are named in the following pattern
<thruster_topic_prefix>/id_<index>/<thruster_topic_suffix>
Thruster frames should also be named as follows
<thruster_frame_base>_<index>
"""
MAX_THRUSTERS = 16
def __init__(self, node_name, **kwargs):
"""Class constructor."""
super().__init__(node_name,
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
**kwargs)
#Default sim_time to True
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# self.set_parameters([sim_time])
# This flag will be set to true once the thruster allocation matrix is
# available
self._ready = False
# Acquiring the namespace of the vehicle
self.namespace = self.get_namespace()
if self.namespace != '':
if self.namespace[-1] != '/':
self.namespace += '/'
if self.namespace[0] != '/':
self.namespace = '/' + self.namespace
self.config = self.get_parameters_by_prefix('thruster_manager')
if len(self.config) == 0:
#if not self.has_parameter('thruster_manager'):
raise RuntimeError('Thruster manager parameters not '
'initialized for uuv_name=' +
self.namespace)
self.config = parse_nested_params_to_dict(self.config, '.')
# Load all parameters
#self.config = self.get_parameter('thruster_manager').value
#TODO To change in foxy
#robot_description_param = self.namespace + 'robot_description'
robot_description_param = 'urdf_file'
self.use_robot_descr = False
self.axes = {}
if self.has_parameter(robot_description_param):
urdf_file = self.get_parameter(robot_description_param).value
if urdf_file != "":
self.use_robot_descr = True
self.parse_urdf(urdf_file)
if self.config['update_rate'].value < 0:
self.config['update_rate'].value = 50.0
self.base_link_ned_to_enu = None
self.tf_buffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tf_buffer, self)
# Initialize some variables
self.output_dir = None
self.n_thrusters = 0
# Thruster objects used to calculate the right angular velocity command
self.thrusters = list()
# Thrust forces vector
self.thrust = None
# Thruster allocation matrix: transform thruster inputs to force/torque
self.configuration_matrix = None
self.inverse_configuration_matrix = None
self.init_future = rclpy.Future()
self.init_thread = threading.Thread(target=self._init_async, daemon=True)
self.init_thread.start()
# =========================================================================
def _init_async(self):
try:
self._init_async_impl()
except Exception as e:
self.get_logger().warn('Caught exception: ' + repr(e))
# =========================================================================
def _init_async_impl(self):
tf_trans_ned_to_enu = None
try:
if self.namespace != '':
target = '{}base_link'.format(self.namespace)
target = target[1::]
source = '{}base_link_ned'.format(self.namespace)
else:
target = 'base_link'
source = 'base_link_ned'
source = source[1::]
tf_trans_ned_to_enu = self.tf_buffer.lookup_transform(
target, source, rclpy.time.Time(), rclpy.time.Duration(seconds=1))
except Exception as e:
self.get_logger().warn('No transform found between base_link and base_link_ned'
' for vehicle {}, message={}'.format(self.namespace, e))
self.base_link_ned_to_enu = None
if tf_trans_ned_to_enu is not None:
self.base_link_ned_to_enu = transformations.quaternion_matrix(
(tf_trans_ned_to_enu.transform.rotation.x,
tf_trans_ned_to_enu.transform.rotation.y,
tf_trans_ned_to_enu.transform.rotation.z,
tf_trans_ned_to_enu.transform.rotation.w))[0:3, 0:3]
self.get_logger().info('base_link transform NED to ENU=\n' + str(self.base_link_ned_to_enu))
self.get_logger().info(
'ThrusterManager::update_rate=' + str(self.config['update_rate'].value))
# Set the tf_prefix parameter
#TODO probably comment
#self.set_parameters(['thruster_manager/tf_prefix'], [self.namespace])
# param_tf_prefix = rclpy.parameter.Parameter('thruster_manager.tf_prefix', rclpy.Parameter.Type.STRING, self.namespace)
# self.set_parameters([param_tf_prefix])
# Retrieve the output file path to store the TAM
# matrix for future use
self.output_dir = None
if self.has_parameter('output_dir'):
self.output_dir = self.get_parameter('output_dir').get_parameter_value().string_value
if not isdir(self.output_dir):
raise RuntimeError(
'Invalid output directory, output_dir=' + self.output_dir)
self.get_logger().info('output_dir=' + self.output_dir)
# Number of thrusters
self.n_thrusters = 0
# Thruster objects used to calculate the right angular velocity command
self.thrusters = list()
# Thrust forces vector
self.thrust = None
#TODO Close check...transform to dict
# Thruster allocation matrix: transform thruster inputs to force/torque
self.configuration_matrix = None
# tam = self.get_parameter('tam')
# if len(tam) != 0:
#tam = parse_nested_params_to_dict(tam, '.')
if self.has_parameter('tam'):
tam = self.get_parameter('tam').value
tam = numpy.array(tam)
#reshape the array. #TODO Unsure
self.configuration_matrix = numpy.reshape(tam, (6, -1))
# Set number of thrusters from the number of columns
self.n_thrusters = self.configuration_matrix.shape[1]
# Create publishing topics to each thruster
params = self.config['conversion_fcn_params']
conv_fcn = self.config['conversion_fcn'].value
if type(params) == list and type(conv_fcn) == list:
if len(params) != self.n_thrusters or len(conv_fcn) != self.n_thrusters:
raise RuntimeError('Lists conversion_fcn and '
'conversion_fcn_params must have '
'the same number of items as of '
'thrusters')
#TODO !!!!!!!!! Check if the topic name has to be corrected somewhere !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for i in range(self.n_thrusters):
topic = self.config['thruster_topic_prefix'].value + 'id_' + str(i) + \
self.config['thruster_topic_suffix'].value
if list not in [type(params), type(conv_fcn)]:
#Unpack parameters to values
deduced_params = {key: val.value for key, val in params.items()}
thruster = Thruster.create_thruster(
self, conv_fcn, i, topic, None, None,
**deduced_params)
else:
#Unpack parameters to values
deduced_params = {key: val.value for key, val in params[i].items()}
thruster = Thruster.create_thruster(
self, conv_fcn[i], i, topic, None, None,
**deduced_params)
if thruster is None:
RuntimeError('Invalid thruster conversion '
'function=%s'
% self.config['conversion_fcn'].value)
self.thrusters.append(thruster)
self.get_logger().info('Thruster allocation matrix provided!')
self.get_logger().info('TAM=')
self.get_logger().info(str(self.configuration_matrix))
self.thrust = numpy.zeros(self.n_thrusters)
if not self.update_tam():
raise RuntimeError('No thrusters found')
# (pseudo) inverse: force/torque to thruster inputs
self.inverse_configuration_matrix = None
if self.configuration_matrix is not None:
self.inverse_configuration_matrix = numpy.linalg.pinv(
self.configuration_matrix)
# If an output directory was provided, store matrix for further use
if self.output_dir is not None:
with open(join(self.output_dir, 'TAM.yaml'), 'w') as yaml_file:
yaml_file.write(
yaml.safe_dump(
dict(tam=self.configuration_matrix.tolist())))
# else:
# self.get_logger().info('Invalid output directory for the TAM matrix, dir=' + str(self.output_dir))
self._ready = True
self.init_future.set_result(True)
self.get_logger().info('ThrusterManager: ready')
#==============================================================================
# TODO Change in foxy
def parse_urdf(self, urdf_file):
root = etree.parse(urdf_file)
# root = etree.fromstring(urdf_str)
for joint in root.findall('joint'):
if joint.get('type') == 'fixed':
continue
axis_str_list = joint.find('axis').get('xyz').split()
child = joint.find('child').get('link')
#FIXME Just nb: removed for tf
# if child[0]!='/':
# child = '/'+child
self.axes[child] = numpy.array([float(axis_str_list[0]),
float(axis_str_list[1]),
float(axis_str_list[2]), 0.0])
#==============================================================================
def update_tam(self, recalculate=False):
"""Calculate the thruster allocation matrix, if one is not given."""
if self.configuration_matrix is not None and not recalculate:
self._ready = True
self.get_logger().info('TAM provided, skipping...')
self.get_logger().info('ThrusterManager: ready')
return True
self._ready = False
self.get_logger().info('ThrusterManager: updating thruster poses')
# Small margin to make sure we get thruster frames via tf
now = self.get_clock().now() + rclpy.time.Duration(nanoseconds=int(0.2 * 1e9))
base = self.namespace + self.config['base_link'].value
base = base[1:]
self.thrusters = list()
equal_thrusters = True
idx_thruster_model = 0
if type(self.config['conversion_fcn_params']) == list and \
type(self.config['conversion_fcn'].value) == list:
if len(self.config['conversion_fcn_params']) != len(
self.config['conversion_fcn'].value):
raise RuntimeError(
'Lists of conversion_fcn_params and conversion_fcn'
' must have equal length')
equal_thrusters = False
self.get_logger().info('conversion_fcn=' + str(self.config['conversion_fcn'].value))
self.get_logger().info('conversion_fcn_params=' + str(self.config['conversion_fcn_params']))
#listener = tf.TransformListener()
#sleep(0.1)
for i in range(self.MAX_THRUSTERS):
frame = self.namespace + \
self.config['thruster_frame_base'].value + str(i)
frame = frame[1:]
try:
# try to get thruster pose with respect to base frame via tf
self.get_logger().info('transform: ' + base + ' -> ' + frame)
now = self.get_clock().now() + rclpy.time.Duration(nanoseconds=int(0.2 * 1e9))
# self.tf_buffer.can_transform(base, frame,
# now, timeout=rclpy.time.Duration(seconds=1))
transformObject = self.tf_buffer.lookup_transform(base, frame, now, timeout=rclpy.duration.Duration(seconds=5))
pos = numpy.array([transformObject.transform.translation.x,
transformObject.transform.translation.y,
transformObject.transform.translation.z])
quat = numpy.array([transformObject.transform.rotation.x,
transformObject.transform.rotation.y,
transformObject.transform.rotation.z,
transformObject.transform.rotation.w])
topic = self.config['thruster_topic_prefix'].value + 'id_' + str(i) + \
self.config['thruster_topic_suffix'].value
# If not using robot_description, thrust_axis=None which will
# result in the thrust axis being the x-axis,i.e. (1,0,0)
thrust_axis = None if not self.use_robot_descr else self.axes[frame]
if equal_thrusters:
params = self.config['conversion_fcn_params']
#Unpack parameters to values
params = {key: val.value for key, val in params.items()}
thruster = Thruster.create_thruster(
self, self.config['conversion_fcn'].value,
i, topic, pos, quat, self.axes[frame], **params)
else:
if idx_thruster_model >= len(self.config['conversion_fcn'].value):
raise RuntimeError('Number of thrusters found and '
'conversion_fcn are different')
params = self.config['conversion_fcn_params'][idx_thruster_model]
#Unpack parameters to values
params = {key: val.value for key, val in params.items()}
conv_fcn = self.config['conversion_fcn'][idx_thruster_model].value
thruster = Thruster.create_thruster(
self,
conv_fcn,
i, topic, pos, quat, self.axes[frame],
**params)
idx_thruster_model += 1
if thruster is None:
RuntimeError('Invalid thruster conversion '
'function=%s'
% self.config['conversion_fcn'].value)
self.thrusters.append(thruster)
except Exception as e:
self.get_logger().warn('could not get transform from: ' + base)
self.get_logger().warn('to: ' + frame)
self.get_logger().warn('Except: ' + repr(e))
break
self.get_logger().info(str(self.thrusters))
if len(self.thrusters) == 0:
return False
# Set the number of thrusters found
self.n_thrusters = len(self.thrusters)
# Fill the thrust vector
self.thrust = numpy.zeros(self.n_thrusters)
# Fill the thruster allocation matrix
self.configuration_matrix = numpy.zeros((6, self.n_thrusters))
for i in range(self.n_thrusters):
self.configuration_matrix[:, i] = self.thrusters[i].tam_column
# Eliminate small values
self.configuration_matrix[numpy.abs(
self.configuration_matrix) < 1e-3] = 0.0
self.get_logger().info('TAM= %s' % str(self.configuration_matrix))
# Once we know the configuration matrix we can compute its
# (pseudo-)inverse:
self.inverse_configuration_matrix = numpy.linalg.pinv(
self.configuration_matrix)
# If an output directory was provided, store matrix for further use
if self.output_dir is not None and not recalculate:
with open(join(self.output_dir, 'TAM.yaml'), 'w') as yaml_file:
yaml_file.write(
yaml.safe_dump(
dict(tam=self.configuration_matrix.tolist())))
self.get_logger().info('TAM saved in <{}>'.format(join(self.output_dir, 'TAM.yaml')))
elif recalculate:
self.get_logger().info('Recalculate flag on, matrix will not be stored in TAM.yaml')
else:
self.get_logger().error('Invalid output directory for the TAM matrix, dir='.format(
self.output_dir))
self._ready = True
self.get_logger().info('TAM updated')
return True
#==============================================================================
def command_thrusters(self):
"""Publish the thruster input into their specific topic."""
if self.thrust is None:
return
for i in range(self.n_thrusters):
self.thrusters[i].publish_command(self.thrust[i])
#==============================================================================
def publish_thrust_forces(self, control_forces, control_torques,
frame_id=None):
if not self._ready:
return
if frame_id is not None:
if self.config['base_link'].value != frame_id:
assert self.base_link_ned_to_enu is not None, 'Transform from'
' base_link_ned to base_link could not be found'
if 'base_link_ned' not in self.config['base_link'].value:
control_forces = numpy.dot(self.base_link_ned_to_enu,
control_forces)
control_torques = numpy.dot(self.base_link_ned_to_enu,
control_torques)
else:
control_forces = numpy.dot(self.base_link_ned_to_enu.T,
control_forces)
control_torques = numpy.dot(self.base_link_ned_to_enu.T,
control_torques)
gen_forces = numpy.hstack(
(control_forces, control_torques)).transpose()
self.thrust = self.compute_thruster_forces(gen_forces)
self.command_thrusters()
#==============================================================================
def compute_thruster_forces(self, gen_forces):
"""Compute desired thruster forces using the inverse configuration
matrix.
"""
# Calculate individual thrust forces
thrust = self.inverse_configuration_matrix.dot(gen_forces)
# Obey limit on max thrust by applying a constant scaling factor to all
# thrust forces
limitation_factor = 1.0
if type(self.config['max_thrust'].value) == list:
if len(self.config['max_thrust'].value) != self.n_thrusters:
raise RuntimeError('max_thrust list must have the length'
' equal to the number of thrusters')
max_thrust = self.config['max_thrust'].value
else:
max_thrust = [self.config['max_thrust'].value for _ in range(self.n_thrusters)]
for i in range(self.n_thrusters):
if abs(thrust[i]) > max_thrust[i]:
thrust[i] = numpy.sign(thrust[i]) * max_thrust[i]
return thrust
# =========================================================================
@property
def ready(self):
return self._ready
|
goroutine.py | from threading import Thread
from typing import Any, Iterable, Mapping, Generator
from types_extensions import Function, void, safe_type, const, Number_t
class ChannelClosed(Exception):
def __str__(self) -> str:
return "This channel is closed. You can bypass this exception by passing `safe=True` to put()"
class Channel:
def __init__(self) -> void:
self._buffer: list[Any] = []
self._closed: bool = False
@property
def is_closed(self) -> bool:
return self._closed
@property
def size(self) -> int:
return len(self._buffer)
def put(self, obj: Any, safe: bool = True):
if not self._closed:
self._buffer.append(obj)
else:
if safe:
return
raise ChannelClosed
def poll(self, block: bool = True) -> Any:
while self.size == 0:
if block:
...
else:
return
return self._buffer.pop(0)
def iter(self) -> Generator:
while self.size > 0:
yield self.poll(block=False)
def close(self) -> void:
self._closed = True
class _GoThread(Thread):
def __init__(self, channel: Channel, group: void = None, target: Function = None, name: str = None,
args: Iterable[Any] = (), kwargs: Mapping[str, Any] = None, *, daemon: bool = None) -> void:
Thread.__init__(self, group=group, target=target, name=name,
args=args, kwargs=kwargs, daemon=daemon)
self._target: Function = target
self._args: Iterable[Any] = args
self._kwargs: safe_type(Mapping[str, Any]) = kwargs or {}
self._kwargs['channel'] = channel
self.finished: bool = False
def run(self) -> void:
if self._target is not None:
try:
self._target(*self._args, **self._kwargs)
except TypeError:
del self._kwargs['channel']
self._target(*self._args, **self._kwargs)
self.finished = True
del self._target, self._args, self._kwargs
class GoRoutine:
def __init__(self, executor: _GoThread, chan: Channel) -> void:
self.__executor: const(_GoThread) = executor
self.channel: const(Channel) = chan
@property
def is_finished(self) -> bool:
return self.__executor.finished
def poll(self, block: bool = True) -> Any:
return self.channel.poll(block)
def stop(self, timeout: Number_t = None) -> void:
self.channel.close()
self.__executor.join(timeout)
def go(func: Function, *args, **kwargs) -> GoRoutine:
channel = Channel()
t_ = _GoThread(channel, target=func, args=args, kwargs=kwargs)
t_.start()
return GoRoutine(
executor=t_,
chan=channel
)
|
_app.py | """
"""
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import inspect
import select
import sys
import threading
import time
import traceback
import six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from . import _logging
__all__ = ["WebSocketApp"]
class Dispatcher:
"""
Dispatcher
"""
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r, w, e = select.select(
(self.app.sock.sock, ), (), (), self.ping_timeout)
if r:
if not read_callback():
break
check_callback()
class SSLDispatcher:
"""
SSLDispatcher
"""
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r = self.select()
if r:
if not read_callback():
break
check_callback()
def select(self):
sock = self.app.sock.sock
if sock.pending():
return [sock,]
r, w, e = select.select((sock, ), (), (), self.ping_timeout)
return r
class WebSocketApp(object):
"""
Higher level of APIs are provided. The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
WebSocketApp initialization
Parameters
----------
url: <type>
websocket url.
header: list or dict
custom header for websocket handshake.
on_open: <type>
callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: <type>
callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: <type>
callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: <type>
callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: <type>
callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: <type>
callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: <type>
this parameter is obsolete and ignored.
get_mask_key: func
a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
cookie: str
cookie value.
subprotocols: <type>
array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message
Parameters
----------
data: <type>
Message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: <type>
Operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
Close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
self.sock = None
def _send_ping(self, interval, event, payload):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping(payload)
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
ping_payload="",
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None):
"""
Run event loop for WebSocket framework.
This loop is an infinite loop and is alive while websocket is available.
Parameters
----------
sockopt: tuple
values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: dict
optional dict object for ssl socket option.
ping_interval: int or float
automatically send "ping" command
every specified period (in seconds)
if set to 0, not send automatically.
ping_timeout: int or float
timeout (in seconds) if the pong message is not received.
ping_payload: str
payload message to send with each ping.
http_proxy_host: <type>
http proxy host name.
http_proxy_port: <type>
http proxy port. If not set, set to 80.
http_no_proxy: <type>
host names, which doesn't use proxy.
skip_utf8_validation: bool
skip utf8 validation.
host: str
update host header.
origin: str
update origin header.
dispatcher: <type>
customize reading data from socket.
suppress_origin: bool
suppress outputting origin header.
Returns
-------
teardown: bool
False if caught KeyboardInterrupt, True if other exception was raised during a loop
"""
if ping_timeout is not None and ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if not sockopt:
sockopt = []
if not sslopt:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
def teardown(close_frame=None):
"""
Tears down the connection.
If close_frame is set, we will invoke the on_close handler with the
statusCode and reason from there.
"""
if thread and thread.is_alive():
event.set()
thread.join()
self.keep_running = False
if self.sock:
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message is not None,
skip_utf8_validation=skip_utf8_validation,
enable_multithread=True if ping_interval else False)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin, suppress_origin=suppress_origin,
proxy_type=proxy_type)
if not dispatcher:
dispatcher = self.create_dispatcher(ping_timeout)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event, ping_payload))
thread.daemon = True
thread.start()
def read():
if not self.keep_running:
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
return teardown(frame)
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if (ping_timeout):
has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
if (self.last_ping_tm and
has_timeout_expired and
(has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
return not isinstance(e, KeyboardInterrupt)
def create_dispatcher(self, ping_timeout):
timeout = ping_timeout or 10
if self.sock.is_ssl():
return SSLDispatcher(self, timeout)
return Dispatcher(self, timeout)
def _get_close_args(self, data):
"""
_get_close_args extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments
"""
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if _logging.isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
rpc_test.py | import concurrent.futures
import contextlib
import json
import os
import sys
import threading
import time
from collections import namedtuple
from functools import partial
from threading import Event
from threading import Lock
from unittest import mock
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torch.distributed.autograd as dist_autograd
from torch.distributed.rpc import RRef, _get_debug_info, _rref_context_get_debug_info
from torch.distributed.rpc.api import _delete_all_user_and_unforked_owner_rrefs, _use_rpc_pickler, _thread_local_var, _wait_all
from torch.distributed.rpc.internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from torch.futures import Future
from torch.testing._internal.common_distributed import (
skip_if_lt_x_gpu,
captured_output,
)
from torch.testing._internal.common_utils import IS_MACOS, load_tests, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
get_function_event,
initialize_pg,
wait_until_node_failure,
wait_until_pending_futures_and_users_flushed,
wait_until_owners_and_forks_on_rank,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
def foo_add():
return torch.add(torch.ones(1), torch.ones(1))
def udf_with_torch_ops(device=-1, use_record_function=False):
device_ctx = contextlib.suppress() if device == -1 else torch.cuda.device(device)
record_function_ctx = (
torch.autograd.profiler.record_function("##forward##")
if use_record_function
else contextlib.suppress()
)
with device_ctx, record_function_ctx:
t1, t2 = torch.ones(1), torch.ones(1)
t = torch.add(t1, t2)
t = torch.mul(t, t)
t = t.relu()
t = t.sigmoid()
# Events (operator invocations) that are expected to be ran as part of the above
# function.
EXPECTED_REMOTE_EVENTS = [
"aten::ones",
"aten::ones",
"aten::add",
"aten::mul",
"aten::relu",
"aten::clamp_min",
"aten::sigmoid",
]
# Remote operations are prefixed with the following string for RPC profiling.
REMOTE_OP_STR = "#remote_op: "
VALUE_FUTURE = concurrent.futures.Future()
DONE_FUTURE = concurrent.futures.Future()
FIFTY_MIL_CYCLES = 50000000
_rpc_barrier_count = 0
def _increment_count():
global _rpc_barrier_count
_rpc_barrier_count += 1
def _reset_count():
global _rpc_barrier_count
_rpc_barrier_count = 0
class StubRpcAgent:
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {
rpc.WorkerInfo(name=worker_name(rank), id=rank)
for rank in range(self.world_size)
}
def _stub_construct_rpc_backend_options_handler(**kwargs):
return mock.Mock() # RpcBackendOptions.
def _stub_init_rpc_backend_handler(store, name, rank, world_size, rpc_backend_options):
return StubRpcAgent(world_size=world_size)
def set_value(value):
VALUE_FUTURE.set_result(value)
def wait_for_value_future():
return VALUE_FUTURE.result()
def set_and_check_done(value):
VALUE_FUTURE.set_result(value)
return DONE_FUTURE.result()
# it is used to test python user defined function over rpc
# classes and functions are used to test python user defined class and
# methods over rpc
TensorClass = namedtuple("TensorClass", ["tensors"])
class MyPickleClass:
def __init__(self):
self.t = None
def __getstate__(self):
(pickled_python_udf, tensors) = _internal_rpc_pickler.serialize(
PythonUDF(my_tensor_function, (torch.ones(2, 2), torch.ones(2, 2)), None)
)
return (pickled_python_udf, tensors)
def __setstate__(self, obj):
python_udf = _internal_rpc_pickler.deserialize(obj[0], obj[1])
result = python_udf.func(python_udf.args[0], python_udf.args[1])
self.t = result
def set(self, val):
self.t = val
class SlowPickleClass:
def __init__(self, t):
self.t = t
def __getstate__(self):
time.sleep(self.t)
return (self.t, )
def __setstate__(self, obj):
self.t = obj[0]
time.sleep(self.t)
class MyClass:
def __init__(self, a, delay=False):
self.a = a
# delay initialization to simulate errors if specified
if delay:
time.sleep(2)
def my_instance_method(self, b):
return self.a + b
@classmethod
def my_class_method(cls, d, e):
return d + e
@staticmethod
def my_static_method(f):
return f > 10
def increment_value(self, increment):
self.a += increment
def get_value(self):
return self.a
def my_slow_method(self, my_tensor_arg):
time.sleep(5)
return torch.add(self.a, my_tensor_arg)
def _call_method_on_rref(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def get_rref_list(values):
return [RRef(MyClass(a)) for a in values]
def add_rref_to_value(rref, value):
return rref.to_here() + value
def run_nested_pickle(pickle_cls_instance, tensor):
return pickle_cls_instance.t + tensor
def build_sparse_tensor():
i = [[0, 1, 1], [2, 0, 2]]
v = [3, 4, 5]
return torch.sparse_coo_tensor(i, v, (2, 3))
def build_complex_tensors():
a = torch.ones(3, 3)
b = [a, a]
c = [b, b]
d = [a, b]
e = {a: d}
return [a, b, c, d, e]
def non_cont_test(t_view, t_cont):
if t_view.is_contiguous():
raise Exception('t_view is contiguous!')
if not t_cont.is_contiguous():
raise Exception('t_cont is not contiguous!')
if not torch.equal(t_view, t_cont):
raise Exception('t_view is not equal to t_cont!')
return t_view
def my_function(a, b, c):
return a + b + c
def my_tensor_function(a, b):
return a + b
def my_sleep_func(seconds=1):
time.sleep(seconds)
return torch.mul(torch.tensor(1), torch.tensor(1))
def my_complex_tensor_function(list_input, tensor_class_input, dict_input):
res = list_input[0]
for t in list_input:
res += t
for k, v in dict_input.items():
res += v
complex_tensors = tensor_class_input.tensors
return (res, complex_tensors[0], complex_tensors[1], complex_tensors[2])
def my_rref_function(rref_a, rref_b):
return rref_a.to_here() + rref_b.to_here()
def delayed_add(a, b, seconds=0.05):
time.sleep(seconds)
return a + b
def no_result():
print("do nothing")
def raise_or_inc(value):
if value.numel() == 2:
raise ValueError("Expected error")
return value + 1
def nested_rpc(dst):
return rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
def multi_layer_nested_async_rpc(dst, world_size, ttl):
# this method returns immediately without blocking the callee, but will
# generate additional requests.
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
rpc.rpc_async(
current_dst,
multi_layer_nested_async_rpc,
args=(next_dst, world_size, ttl - 1),
)
return 0
def nested_rref(dst):
return (
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1)),
rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 2)),
)
def nested_remote(dst):
rref = rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 3))
return rref.to_here()
def rref_forward_chain(dst, world_size, rref, ttl):
if ttl > 0:
current_dst = worker_name(dst)
next_dst = (dst + 1) % world_size
ret_rref = rpc.remote(
current_dst, rref_forward_chain, args=(next_dst, world_size, rref, ttl - 1)
)
return [ret_rref]
else:
return rref.to_here()
def rpc_return_rref(dst):
return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
def light_rpc():
return 0
def heavy_rpc(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def heavy_rpc_torchscript(tensor):
for i in range(1, 100):
tensor *= i
tensor /= i + 1
return 0
@torch.jit.script
def my_script_func(tensor):
return torch.add(tensor, tensor)
expected_err = "Expected error"
def raise_func():
raise ValueError(expected_err)
@torch.jit.script
def raise_func_script(expected_err: str) -> torch.Tensor:
raise ValueError(expected_err)
expected_err_escape = "\nFirst line of error \n next line of error \n last line of error"
def raise_func_escape():
raise ValueError(expected_err_escape)
global_rref = None
def set_global_rref(rref):
global global_rref
global_rref = rref
def clear_global_rref():
global global_rref
global_rref = None
def check_rref_confirmed(rref):
return rref.confirmed_by_owner()
def get_rref_debug_info():
return _rref_context_get_debug_info()
def add_use_future_cb(to, x, y, z):
out = concurrent.futures.Future()
def callback(fut):
out.set_result(fut.wait() + z)
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(callback)
return out.result()
def get_events_from_profile(profile_rref):
return profile_rref.local_value().process_global_function_events
def add_use_future_set_result(to, x, y, z):
out = torch.futures.Future()
fut = rpc.rpc_async(to, torch.add, args=(x, y))
fut.then(lambda fut : out.set_result(fut.wait() + z))
return out.wait()
def add_use_future_nested_cb(to, x, y, z):
out = torch.futures.Future()
def callback(fut1):
fut2 = rpc.rpc_async(to, torch.add, args=(fut1.wait(), z))
fut2.then(lambda fut2 : out.set_result(fut2.wait()))
fut1 = rpc.rpc_async(to, torch.add, args=(x, y))
fut1.then(callback)
return out.wait()
def fail_on_fut(fut):
pass
@rpc.functions.async_execution
def async_raise_func():
raise RuntimeError("Expected error")
@rpc.functions.async_execution
def async_wrong_type():
return torch.zeros(2, 2)
@rpc.functions.async_execution
def async_add(to, x, y):
return rpc.rpc_async(to, torch.add, args=(x, y))
def slow_add(x, y, device="cpu"):
time.sleep(1)
x = x.to(device)
y = y.to(device)
return torch.add(x, y).cpu()
@rpc.functions.async_execution
def slow_async_add(to, x, y, device="cpu"):
return rpc.rpc_async(to, slow_add, args=(x, y, device))
@rpc.functions.async_execution
def async_add_with_future_ctor(to, x, y, z):
fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut1: fut.set_result(fut1.wait() + z)
)
return fut
@rpc.functions.async_execution
def async_add_chained(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_chained_multi(to, x, num, step):
fut = rpc.rpc_async(to, torch.add, args=(x, 0))
for _ in range(num):
fut = fut.then(lambda fut: fut.wait() + step)
return fut
@rpc.functions.async_execution
def async_add_nested(to, x, y, z):
return rpc.rpc_async(to, async_add, args=(to, x, y)).then(
lambda fut: fut.wait() + z
)
@rpc.functions.async_execution
def async_add_multi_fanout(to, x, num, step):
futs = []
for i in range(num):
if i == 0:
futs.append(rpc.rpc_async(to, torch.add, args=(x, step)))
else:
futs.append(rpc.rpc_async(to, torch.add, args=(0, step)))
# TODO: use torch.futures.collect_all
lock = Lock()
state = {"cnt": 0, "ret": torch.zeros_like(x)}
ret_future = torch.futures.Future()
def inc_and_set(fut):
with lock:
state["cnt"] += 1
state["ret"] += fut.wait()
if state["cnt"] >= len(futs):
ret_future.set_result(state["ret"])
for fut in futs:
fut.then(inc_and_set)
return ret_future
@rpc.functions.async_execution
def async_cuda_sleep_and_set_to_one(t):
device = t.device
original_stream = torch.cuda.current_stream(device)
new_stream = torch.cuda.Stream(device)
new_stream.wait_stream(original_stream)
with torch.cuda.stream(new_stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
t.fill_(1)
fut = Future(devices=[device])
fut.set_result(t)
return fut
@rpc.functions.async_execution
def async_cuda_nested_add(to, x, y, z):
def cb(fut):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
return fut.value() + z
return rpc.rpc_async(to, torch.add, args=(x, y)).then(cb)
# A custom Python class that contains a tensor, needed to see if we correctly
# use the Python pickler to extract tensors from non-IValue-convertible types.
class TensorWrapper:
__slots__ = ("tensor", "lock", "event")
def __init__(self, t):
self.tensor = t
# Add one non-picklable field, to ensure it's ignored/skipped.
self.lock = Lock()
self.event = torch.cuda.Event(enable_timing=True)
def increase(self, v):
with self.lock:
self.tensor += v
def sum(self):
with self.lock:
self.event.record()
return self.tensor.sum()
# Copied from test/test_cuda.py.
_cycles_per_ms = None
def get_cycles_per_ms():
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
global _cycles_per_ms
if _cycles_per_ms is None:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
_cycles_per_ms = 1000000 / start.elapsed_time(end)
return _cycles_per_ms
class AsyncExecutionClass:
@staticmethod
@rpc.functions.async_execution
def static_async_add(to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
@classmethod
@rpc.functions.async_execution
def class_async_add(cls, to, x, y, z):
ret_fut = torch.futures.Future()
rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: ret_fut.set_result(fut.wait() + z)
)
return ret_fut
@rpc.functions.async_execution
def bound_async_add(self, to, x, y, z):
return rpc.rpc_async(to, torch.add, args=(x, y)).then(
lambda fut: fut.wait() + z
)
def return_future():
return torch.futures.Future()
class FooBackendOptions(rpc.RpcBackendOptions):
def __init__(self, init_method):
# Must call the __init__ of the superclass (and do so directly,
# without using super()) because... pybind.
rpc.RpcBackendOptions.__init__(self)
self.init_method = init_method
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
class RpcTest(RpcAgentTestFixture):
@dist_init
def test_worker_id(self):
n = self.rank + 1
peer_rank = n % self.world_size
self_worker_info = rpc.get_worker_info()
peer_worker_info = rpc.get_worker_info(worker_name(peer_rank))
self.assertEqual(self_worker_info.name, worker_name(self.rank))
self.assertEqual(peer_worker_info.name, worker_name(peer_rank))
with self.assertRaisesRegex(RuntimeError, "Unknown destination worker"):
unknown_worker_id = rpc.get_worker_info("WorkerUnknown")
@dist_init
def test_get_worker_infos(self):
worker_infos = rpc.api._get_current_rpc_agent().get_worker_infos()
worker_names = {worker_info.name for worker_info in worker_infos}
expected_worker_names = {
worker_name(rank) for rank in range(self.world_size)
}
self.assertEqual(worker_names, expected_worker_names)
worker_ids = {worker_info.id for worker_info in worker_infos}
expected_worker_ids = set(range(self.world_size))
self.assertEqual(worker_ids, expected_worker_ids)
@dist_init
def test_self_add(self):
self_worker_info = rpc.get_worker_info()
self_worker_name = worker_name(self.rank)
fut = rpc.rpc_async(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
ret = rpc.rpc_sync(self_worker_info, torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_send_to_rank(self):
dst_rank = (self.rank + 1) % self.world_size
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
ret = self._run_func_in_mode(dst_rank, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test invalid ranks
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(self.world_size + 1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(RuntimeError):
self._run_func_in_mode(-1, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank + 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
with self.assertRaises(ValueError):
self._run_func_in_mode(dst_rank - 0.5, torch.add, exec_mode, args=(torch.ones(2, 2), 1))
@dist_init
def test_self_py_udf_remote(self):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1 + 3)
def _test_self_remote_rref_as_rpc_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
fut = rpc.rpc_async(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
ret = rpc.rpc_sync(dst, add_rref_to_value, args=(rref, torch.ones(2, 2) + 1))
self.assertEqual(ret, torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2) + 1)
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2))
@dist_init
def test_self_remote_rref_as_rpc_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_rpc_arg(dst)
@dist_init
def test_self_remote_rref_as_self_rpc_arg(self):
self._test_self_remote_rref_as_rpc_arg(rpc.get_worker_info())
def _test_self_remote_rref_as_remote_arg(self, dst):
self_worker_info = rpc.get_worker_info()
rref = rpc.remote(self_worker_info, my_function, args=(torch.ones(2, 2), 1, 3))
ret_rref = rpc.remote(dst, add_rref_to_value, args=(rref, torch.ones(2, 2)))
self.assertEqual(
ret_rref.to_here(), torch.ones(2, 2) + 1 + 3 + torch.ones(2, 2)
)
@dist_init
def test_self_remote_rref_as_remote_arg(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_self_remote_rref_as_remote_arg(dst)
@dist_init
def test_rref_proxy_non_exist(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
msg = "has no attribute \'non_exist\'"
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_sync().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.rpc_async().non_exist()
with self.assertRaisesRegex(AttributeError, msg):
rref.remote().non_exist()
def _test_rref_proxy_tensor(self, dst):
rref = rpc.remote(dst, my_function, args=(torch.ones(2, 2), 1, 3))
expected = torch.ones(2, 2) + 1 + 3
self.assertEqual(expected.size(), rref.rpc_sync().size())
self.assertEqual(expected + 1, rref.rpc_async().add(1).wait())
self.assertEqual(expected.view(1, 4), rref.remote().view(1, 4).to_here())
@dist_init
def test_rref_proxy_tensor(self):
self._test_rref_proxy_tensor(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_tensor_self(self):
self._test_rref_proxy_tensor(rpc.get_worker_info())
@dist_init
def test_rref_proxy_reuse(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
my_function,
args=(torch.ones(2, 2), 1, 3)
)
expected = torch.ones(2, 2) + 1 + 3
proxy_rpc_sync = rref.rpc_sync()
proxy_rpc_async = rref.rpc_async()
proxy_remote = rref.remote()
self.assertEqual(expected.size(), proxy_rpc_sync.size())
self.assertEqual(expected + 1, proxy_rpc_sync.add(1))
self.assertEqual(expected.view(1, 4), proxy_rpc_sync.view(1, 4))
self.assertEqual(expected.size(), proxy_rpc_async.size().wait())
self.assertEqual(expected + 3, proxy_rpc_async.add(3).wait())
self.assertEqual(expected.view(4, 1), proxy_rpc_async.view(4, 1).wait())
self.assertEqual(expected.size(), proxy_remote.size().to_here())
self.assertEqual(expected + 5, proxy_remote.add(5).to_here())
self.assertEqual(expected.view(-1), proxy_remote.view(-1).to_here())
def _test_rref_proxy_class(self, dst):
rref = rpc.remote(dst, MyClass, args=(7,))
expected = MyClass(7)
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
expected.increment_value(3)
self.assertEqual(None, rref.rpc_sync().increment_value(1))
self.assertEqual(None, rref.rpc_async().increment_value(1).wait())
self.assertEqual(None, rref.remote().increment_value(1).to_here())
self.assertEqual(expected.get_value(), rref.rpc_sync().get_value())
self.assertEqual(expected.get_value(), rref.rpc_async().get_value().wait())
self.assertEqual(expected.get_value(), rref.remote().get_value().to_here())
self.assertEqual(
expected.my_instance_method(2),
rref.rpc_sync().my_instance_method(2)
)
self.assertEqual(
expected.my_instance_method(3),
rref.rpc_async().my_instance_method(3).wait()
)
self.assertEqual(
expected.my_instance_method(4),
rref.remote().my_instance_method(4).to_here()
)
self.assertEqual(
expected.my_static_method(9),
rref.rpc_sync().my_static_method(9)
)
self.assertEqual(
expected.my_static_method(10),
rref.rpc_async().my_static_method(10).wait()
)
self.assertEqual(
expected.my_static_method(11),
rref.remote().my_static_method(11).to_here()
)
self.assertEqual(
expected.my_class_method(2, torch.zeros(2, 2)),
rref.rpc_sync().my_class_method(2, torch.zeros(2, 2))
)
self.assertEqual(
expected.my_class_method(2, torch.ones(3, 3)),
rref.rpc_async().my_class_method(2, torch.ones(3, 3)).wait()
)
self.assertEqual(
expected.my_class_method(2, torch.ones(4, 4)),
rref.remote().my_class_method(2, torch.ones(4, 4)).to_here()
)
@dist_init
def test_rref_proxy_class(self):
self._test_rref_proxy_class(worker_name((self.rank + 1) % self.world_size))
@dist_init
def test_rref_proxy_class_self(self):
self._test_rref_proxy_class(rpc.get_worker_info())
@dist_init
def test_self_remote_rref_as_self_remote_arg(self):
self._test_self_remote_rref_as_remote_arg(rpc.get_worker_info())
@mock.patch.object(torch.distributed.autograd, "_init")
@mock.patch.object(torch.distributed.rpc.api, "_set_and_start_rpc_agent")
@dist_init(setup_rpc=False)
def test_register_rpc_backend_and_set_and_start_rpc_backend(
self, mock_rpc_agent, mock_dist_autograd_init
):
backend_name = "stub_backend"
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
with self.assertRaisesRegex(
RuntimeError, "^RPC backend .+: already registered$"
):
backend = rpc.backend_registry.register_backend(
backend_name,
_stub_construct_rpc_backend_options_handler,
_stub_init_rpc_backend_handler,
)
rpc.init_rpc(
name="worker1",
backend=backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
store, _, _ = next(
torch.distributed.rendezvous(
self.init_method, rank=self.rank, world_size=self.world_size
)
)
rpc._init_rpc_backend(
backend=self.rpc_backend,
store=store,
name="duplicate_name",
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_duplicate_name_2(self):
with self.assertRaisesRegex(RuntimeError, "is not unique"):
rpc.init_rpc(
name=worker_name(self.rank % (self.world_size - 1)),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
@dist_init(setup_rpc=False)
def test_reinit(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
# TODO: with TCP init, rank 0 raises Address already in use because
# rank 0 is the start daemon and the store is created before checking if
# RPC is already initialized in init_rpc.
if os.environ.get("RPC_INIT_WITH_TCP", None) == "1" and self.rank == 0:
expected_reinit_err = "Address already in use"
else:
expected_reinit_err = "is already initialized"
with self.assertRaisesRegex(RuntimeError, expected_reinit_err):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
def test_world_size_one(self):
if self.rank == 0:
rpc.init_rpc(
name="me",
backend=self.rpc_backend,
rank=0,
world_size=1,
rpc_backend_options=self.rpc_backend_options,
)
expect = torch.ones(2, 2) * 2
result = rpc.rpc_sync(
"me",
my_tensor_function,
args=(torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(expect, result)
expect = torch.ones(3, 3) * 2
result = rpc.rpc_async(
"me",
my_tensor_function,
args=(torch.ones(3, 3), torch.ones(3, 3))
).wait()
self.assertEqual(expect, result)
expect = torch.ones(4, 4) * 2
result = rpc.remote(
"me",
my_tensor_function,
args=(torch.ones(4, 4), torch.ones(4, 4))
).to_here()
self.assertEqual(expect, result)
rpc.shutdown()
@dist_init(setup_rpc=False)
def test_invalid_names(self):
from torch.distributed.rpc import WorkerInfo
worker_id = 0
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo("abc*", worker_id)
with self.assertRaisesRegex(RuntimeError, "Worker name must match"):
info = WorkerInfo(" ", worker_id)
with self.assertRaisesRegex(RuntimeError, "must be non-empty"):
info = WorkerInfo("", worker_id)
# If the number in the message does not match, it is likely that the
# value of MAX_NAME_LEN in RPC WorkerInfo has changed.
with self.assertRaisesRegex(RuntimeError, "shorter than 128"):
info = WorkerInfo("".join(["a" for i in range(500)]), worker_id)
@dist_init
def test_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@staticmethod
def return_callee_id():
return rpc.get_worker_info().id
@dist_init
def test_int_callee(self):
dst_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(dst_rank, RpcTest.return_callee_id)
self.assertEqual(ret, dst_rank)
@dist_init
def test_add_with_id(self):
n = self.rank + 1
dst_rank = n % self.world_size
workder_info = rpc.get_worker_info(worker_name(dst_rank))
ret = rpc.rpc_sync(
workder_info, torch.add, args=(torch.ones(n, n), torch.ones(n, n))
)
self.assertEqual(ret, torch.ones(n, n) * 2)
@dist_init
def test_scalar_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), n)
)
self.assertEqual(ret, (torch.ones(n, n) + n))
@dist_init
def test_async_add(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_nonzero(self):
n = self.rank + 1
dst_rank = n % self.world_size
x = torch.ones(self.world_size, self.world_size)
x[self.rank][self.rank] = 0
ret = rpc.rpc_sync(worker_name(dst_rank), torch.nonzero, args=(x,))
self.assertEqual(ret, x.nonzero())
@dist_init
def test_multi_rpc(self):
dst_rank = (self.rank + 1) % self.world_size
for i in range(20):
n = i + self.rank + 1
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
def _run_uneven_workload(self, num_repeat=30):
# worker0 drives and waits for worker1 and worker2
# throughout the test.
if self.rank == 0:
self.assertTrue(self.world_size >= 3)
# Phase 1: Only worker1 has workload.
dst = "worker1"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for fut in torch.futures.collect_all(futs).wait():
self.assertEqual(fut.wait(), 0)
# Phase 2: Only worker2 has workload.
# If join is not correctly implemented,
# worker2 should be closed by now.
dst = "worker2"
futs = []
for _ in range(num_repeat):
fut = rpc.rpc_async(dst, heavy_rpc, args=(torch.ones(100, 100),))
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
def test_wait_all_workers(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
def test_wait_all_workers_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
self._run_uneven_workload()
# worker0 calls this at the end after waiting for RPC responses.
# worker1/2 calls this immediately and has some works after it.
# worker3 calls this immediately and has no more work.
rpc.api._wait_all_workers()
rpc.api._wait_all_workers()
# Wait before proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with _profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with _profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with _profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with _profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
with _profile() as prof:
ret = rpc.rpc_async(
dst1, slow_async_add, args=(dst2, x, y, device), timeout=20
)
out = ret.wait()
function_events = prof.function_events
# slow_async_add resulted in an RPC from dst1 -> dst2, so this should be
# recorded.
key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_async_add.__qualname__, worker_name(self.rank), dst1
)
nested_rpc_key_prefix = _build_rpc_profiling_key(
RPCExecMode.ASYNC, slow_add.__qualname__, dst1, dst2
)
expected_key = key_prefix + REMOTE_OP_STR + nested_rpc_key_prefix
remote_events = [event for event in function_events if event.is_remote]
rpc_remote_event = [
event for event in remote_events if event.name == expected_key
]
self.assertEqual(1, len(rpc_remote_event))
rpc_remote_event = rpc_remote_event[0]
self.assertEqual(rpc_remote_event.node_id, (self.rank + 1) % self.world_size)
# slow_async_add's RPC does an add on dst2, which should be reflected as well.
remote_add_key = (
expected_key + REMOTE_OP_STR + torch.jit._builtins._find_builtin(torch.add)
)
remote_add_event = [
event for event in remote_events if event.name == remote_add_key
]
self.assertEqual(1, len(remote_add_event))
remote_add_event = remote_add_event[0]
# Validate that node_id is dst2.
self.assertEqual(remote_add_event.node_id, (self.rank + 2) % self.world_size)
@dist_init
def test_rpc_profiling_async_function(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_async_function_single_threaded(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
self._run_rpc_profiling_async_function()
if torch.cuda.is_available():
dist.barrier()
self._run_rpc_profiling_async_function(device="cuda:0")
@dist_init
def test_rpc_profiling_remote_record_function(self):
# test that functions run over RPC with record_function show the expected
# profiled block.
if self.rank != 1:
return
dst_ranks = [i for i in range(self.world_size) if i != self.rank]
for dst_rank in dst_ranks:
dst_worker = worker_name(dst_rank)
with _profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=(-1, True))
fut.wait()
function_events = prof.function_events
record_function_remote_event = [
evt for evt in function_events if "##forward##" in evt.name
]
self.assertEqual(1, len(record_function_remote_event))
record_function_remote_event = record_function_remote_event[0]
self.assertEqual(record_function_remote_event.node_id, dst_rank)
# cpu_children only returns direct children, so here we get all
# children recursively.
def get_cpu_children(event):
if not event.cpu_children:
return []
cpu_children = event.cpu_children
for e in event.cpu_children:
cpu_children.extend(get_cpu_children(e))
return cpu_children
remote_children = get_cpu_children(record_function_remote_event)
# Get local children and verify parity.
with _profile() as prof:
udf_with_torch_ops(-1, True)
local_function_events = prof.function_events
local_record_function_event = [
evt for evt in local_function_events if "##forward##" in evt.name
][0]
local_children = get_cpu_children(local_record_function_event)
local_children_names = [
evt.name for evt in local_children
]
REMOTE_OP_STR = "#remote_op: "
def convert_remote_to_local(event_name):
remote_op_key = REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key) + len(remote_op_key) :
]
for evt in remote_children:
local_name = convert_remote_to_local(evt.name)
self.assertTrue(local_name in local_children_names)
def validate_profiling_workload(self, dst, prof):
def convert_remote_to_local(event_name):
return event_name[event_name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR) :]
events = prof.function_events
remote_events = {
convert_remote_to_local(event.name): event
for event in events
if event.is_remote
}
self.assertTrue("aten::mul" in remote_events)
remote_mul_event = remote_events["aten::mul"]
self.assertEqual(remote_mul_event.node_id, dst)
self.check_profiling_info(
worker_name(self.rank),
worker_name(dst),
torch.mul,
remote_mul_event,
RPCExecMode.ASYNC,
)
def _run_test_profiler_with_autograd_context(self):
dst = (self.rank + 1) % self.world_size
if self.rank == 1:
# Cases where we can double wrap messages with profiling information and autograd info.
with dist_autograd.context() as context_id:
with _profile() as prof:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
# Ensure that flipped order of ctx managers results in events being
# recorded as expected.
with _profile() as prof:
with dist_autograd.context() as context_id:
self.run_profiling_workload(dst)
self.validate_profiling_workload(dst, prof)
@dist_init
def test_profiler_with_autograd_context_single_threaded(self):
self._run_test_profiler_with_autograd_context()
@dist_init
def test_profiler_with_autograd_context(self):
self._run_test_profiler_with_autograd_context()
def _profiler_test_with_rpc(self, rpc_exec_mode, func, args, use_record_function=False, dst=None):
dst = dst if dst is not None else (self.rank + 1) % self.world_size
# only run profiler on rank 1.
if self.rank == 1:
with _profile() as prof:
record_function_ctx_mgr = (
contextlib.suppress()
if not use_record_function
else torch.autograd.profiler.record_function(
"foo"
)
)
with record_function_ctx_mgr as rf:
if rpc_exec_mode == RPCExecMode.SYNC:
rpc.rpc_sync(worker_name(dst), func, args=args)
elif rpc_exec_mode == RPCExecMode.ASYNC:
fut = rpc.rpc_async(worker_name(dst), func, args=args)
fut.wait()
else:
self.assertTrue(rpc_exec_mode == RPCExecMode.REMOTE)
rref = rpc.remote(worker_name(dst), func, args=args)
rref.to_here()
# To avoid flakiness, wait for the RRef to be profiled. This
# means that we received the acknowledgement of successful
# creation on the owner and ran the callbacks responsible
# for recording the profiling event.
rref._get_profiling_future().wait()
events = prof.function_events
rpc_event = get_function_event(events, rpc_exec_mode.value)
# verify Node ID for this rpc event.
self.assertEqual(rpc_event.node_id, self.rank)
# Ensure recording of remote events.
remote_events = {event for event in events if event.node_id == dst} - {rpc_event}
self.assertGreaterEqual(len(remote_events), 1)
for remote_event in remote_events:
self.assertEqual(remote_event.node_id, dst)
if use_record_function:
scope_event = get_function_event(events, "foo")
# Since RPC call is within the scope, its CPU interval should be
# contained within foo's interval.
self.assertLessEqual(scope_event.time_range.start, rpc_event.time_range.start)
self.assertGreaterEqual(scope_event.time_range.end, rpc_event.time_range.end)
# the sender, dest worker, function run, and type of RPC should all
# be recorded.
self_worker_name = worker_name(self.rank)
dst_worker_name = worker_name(dst)
self.check_profiling_info(self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode)
if use_record_function:
# verify order by ensuring that the outer context comes
# before the rpc event.
foo_event_ix = next(i for i, event in enumerate(events) if "foo" in event.name)
rpc_event_idx = next(i for i, event in enumerate(events) if rpc_exec_mode.value in event.name)
self.assertLess(foo_event_ix, rpc_event_idx)
def _run_test_profiler_with_sync_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.SYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_sync_rpc_udf(self):
self._run_test_profiler_with_sync_rpc_udf()
@dist_init
def test_profiler_with_sync_rpc_udf_single_threaded(self):
self._run_test_profiler_with_sync_rpc_udf()
def _run_test_profiler_with_sync_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_sync_rpc_builtin(self):
self._run_test_profiler_with_sync_rpc_builtin()
@dist_init
def test_profiler_with_sync_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_sync_rpc_builtin()
def _run_test_profiler_with_async_rpc_udf(self):
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(RPCExecMode.ASYNC, my_sleep_func, args=(1,),
use_record_function=True)
@dist_init
def test_profiler_with_async_rpc_udf(self):
self._run_test_profiler_with_async_rpc_udf()
@dist_init
def test_profiler_with_async_rpc_udf_single_threaded(self):
self._run_test_profiler_with_async_rpc_udf()
def _run_test_profiler_with_async_rpc_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
@dist_init
def test_profiler_with_async_rpc_builtin(self):
self._run_test_profiler_with_async_rpc_builtin()
@dist_init
def test_profiler_with_async_rpc_builtin_single_threaded(self):
self._run_test_profiler_with_async_rpc_builtin()
def _run_test_profiler_with_remote_udf(self):
self._profiler_test_with_rpc(RPCExecMode.REMOTE, my_sleep_func, args=(1,))
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_sleep_func, args=(1,), dst=self.rank
)
@dist_init
def test_profiler_with_remote_udf(self):
self._run_test_profiler_with_remote_udf()
@dist_init
def test_profiler_with_remote_udf_single_threaded(self):
self._run_test_profiler_with_remote_udf()
def _run_test_profiler_with_remote_builtin(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1))
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, torch.mul, args=(torch.ones(1), torch.ones(1)),
use_record_function=True
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
torch.mul,
args=(torch.ones(1), torch.ones(1)),
dst=self.rank,
)
@dist_init
def test_profiler_with_remote_builtin(self):
self._run_test_profiler_with_remote_builtin()
@dist_init
def test_profiler_with_remote_builtin_single_threaded(self):
self._run_test_profiler_with_remote_builtin()
def _run_test_profiler_with_script_async_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.ASYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.ASYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_async_rpc(self):
self._run_test_profiler_with_script_async_rpc()
@dist_init
def test_profiler_with_script_async_rpc_single_threaded(self):
self._run_test_profiler_with_script_async_rpc()
def _run_test_profiler_with_script_sync_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.SYNC, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.SYNC,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
@dist_init
def test_profiler_with_script_sync_rpc(self):
self._run_test_profiler_with_script_sync_rpc()
@dist_init
def test_profiler_with_script_sync_rpc_single_threaded(self):
self._run_test_profiler_with_script_sync_rpc()
def _run_test_profiler_with_script_remote_rpc(self):
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),)
)
self._profiler_test_with_rpc(
RPCExecMode.REMOTE,
my_script_func,
args=(torch.tensor(1),),
use_record_function=True,
)
# test remote to self
self._profiler_test_with_rpc(
RPCExecMode.REMOTE, my_script_func, args=(torch.tensor(1),), dst=self.rank
)
@dist_init
def test_profiler_with_script_remote_rpc(self):
self._run_test_profiler_with_script_remote_rpc()
@dist_init
def test_profiler_with_script_remote_rpc_single_threaded(self):
self._run_test_profiler_with_script_remote_rpc()
def _assert_top_level_events(self, process_global_events, expected_top_level_event_names):
top_level_event_names = []
for thread_local_events in process_global_events:
# Get top-level events from all events happened on a thread.
last_end_time = 0
for event in thread_local_events:
event_name = event.name
time_range = event.time_range
if time_range.start > last_end_time:
top_level_event_names.append(event_name)
last_end_time = time_range.end
top_level_event_names = sorted(top_level_event_names)
expected_top_level_event_names = sorted(expected_top_level_event_names)
self.assertEqual(
top_level_event_names,
expected_top_level_event_names,
f"Expected events {expected_top_level_event_names}, but got {top_level_event_names}",
)
@dist_init
def test_server_process_global_profiler(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
x = torch.tensor(1)
y = torch.tensor(2)
outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
outer_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
inner_profile_rref.rpc_sync().__enter__()
rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
inner_profile_rref.rpc_sync().__exit__(None, None, None)
outer_profile_rref.rpc_sync().__exit__(None, None, None)
inner_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (inner_profile_rref,))
expected_inner_events = ['aten::sub']
expected_outer_events = expected_inner_events + ['aten::add']
self._assert_top_level_events(inner_events, expected_inner_events)
outer_events = rpc.rpc_sync(dst_worker_name, get_events_from_profile, (outer_profile_rref,))
self._assert_top_level_events(outer_events, expected_outer_events)
inner_profile_rref.rpc_sync().key_averages()
outer_profile_rref.rpc_sync().key_averages()
@dist_init
def test_async_record_function_double_end_callbacks(self):
num_sleep_seconds = 1
if self.rank == 1:
# Validate that calling the function twice results in an error.
with _profile() as pf:
with torch.autograd.profiler.record_function("foo") as rf:
fut = rpc.rpc_async(
worker_name(0), my_sleep_func, args=(num_sleep_seconds,)
)
rf._call_end_callbacks_on_future(fut)
with self.assertRaisesRegex(
RuntimeError, "can only be called once."
):
rf._call_end_callbacks_on_future(fut)
fut.wait()
@dist_init
def test_async_record_function_cbs_jit_call(self):
if self.rank == 1:
with _profile() as pf:
key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(my_script_func),
"worker1",
"worker0",
)
with torch.autograd.profiler.record_function(key) as rf:
fut = rpc.rpc_async(
worker_name(0), my_script_func, args=(torch.tensor(1),)
)
# Intentionally calling record_function internals
fut = torch.ops.profiler._call_end_callbacks_on_jit_fut(rf.handle, fut)
result = fut.wait()
# Validate that the profiling future returns the same value as the RPC
# future.
expected = torch.add(torch.tensor(1), torch.tensor(1))
self.assertEqual(result, expected)
events = pf.function_events
rpc_event = get_function_event(
events, torch._jit_internal._qualified_name(my_script_func)
)
self.assertTrue(torch._jit_internal._qualified_name(my_script_func) in rpc_event.name)
@dist_init
def test_py_class_constructor(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), MyClass, args=(n,))
self.assertEqual(ret.a, n)
@dist_init
def test_py_class_instance_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass(2).my_instance_method, args=(n,)
)
self.assertEqual(ret, MyClass(2).my_instance_method(n))
@dist_init
def test_py_class_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_class_method, args=(n, n + 1)
)
self.assertEqual(ret, MyClass.my_class_method(n, n + 1))
@dist_init
def test_py_class_static_method(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank), MyClass.my_static_method, args=(n + 10,)
)
self.assertEqual(ret, MyClass.my_static_method(n + 10))
@dist_init
def test_py_multi_async_call(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker_info = rpc.get_worker_info(worker_name(dst_rank))
fut1 = rpc.rpc_async(dst_worker_info, MyClass.my_static_method, args=(n + 10,))
fut2 = rpc.rpc_async(dst_worker_info, min, args=(n, n + 1, n + 2))
self.assertEqual(fut1.wait(), MyClass.my_static_method(n + 10))
self.assertEqual(fut2.wait(), min(n, n + 1, n + 2))
@dist_init
def test_py_no_return_result(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), no_result)
self.assertEqual(ret, no_result())
@dist_init
def test_py_tensors(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, my_tensor_function(torch.ones(n, n), torch.ones(n, n)))
@dist_init
def test_py_tensors_multi_async_call(self):
futs = []
n = self.rank + 1
dst_rank = n % self.world_size
for i in range(100):
fut = rpc.rpc_async(
worker_name(dst_rank),
my_tensor_function,
args=(torch.ones(i, i), torch.ones(i, i)),
)
futs.append(fut)
j = 0
for val in torch.futures.wait_all(futs):
self.assertEqual(
val, my_tensor_function(torch.ones(j, j), torch.ones(j, j))
)
j += 1
@dist_init
def test_py_tensors_in_container(self):
n = self.rank + 1
dst_rank = n % self.world_size
a = [torch.ones(n, n), torch.ones(n, n)]
b = TensorClass(build_complex_tensors())
c = {"foo": torch.ones(n, n), "bar": torch.ones(n, n)}
ret = rpc.rpc_sync(
worker_name(dst_rank), my_complex_tensor_function, args=(a, b, c)
)
self.assertEqual(ret, my_complex_tensor_function(a, b, c))
@dist_init
def test_py_nested_pickle(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_nested_pickle,
args=(MyPickleClass(), torch.ones(2, 2)),
)
m = MyPickleClass()
m.set(my_tensor_function(torch.ones(2, 2), torch.ones(2, 2)))
self.assertEqual(ret, run_nested_pickle(m, torch.ones(2, 2)))
@dist_init
def test_py_function_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
with self.assertRaises(TypeError):
ret = rpc.rpc_sync(worker_name(dst_rank), no_result, args=(10,))
@dist_init
def test_py_raise_in_user_func(self):
with captured_output() as (_, err):
# This barrier prevents a race condition where the main thread has
# not entered the context manager when the remote function runs.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func)
with self.assertRaisesRegex(ValueError, expected_err):
fut.wait()
# This barrier prevents a race condition where the main thread exits
# context manager before the remote function has ran.
dist.barrier()
# Validate that trainers log errors when running functions.
stderr_lines = err.getvalue()
self.assertTrue(expected_err in stderr_lines)
@dist_init
def test_py_raise_in_user_func_escaped_str(self):
n = self.rank + 1
dst_rank = n % self.world_size
fut = rpc.rpc_async(worker_name(dst_rank), raise_func_escape)
try:
fut.wait()
except ValueError as e:
msg = str(e)
# Ensure newlines are unescaped to provide a better repr of error.
self.assertEqual(msg, msg.encode("utf-8").decode("unicode_escape"))
else:
self.assertTrue(False, "expected raise_func_escape to raise ValueError.")
@dist_init
def test_nested_rpc(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
nested_rpc,
args=(worker_name(self.rank),),
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _stress_test_rpc(self, f, repeat=1000, args=()):
n = self.rank + 1
dst_rank = n % self.world_size
futs = []
tik = time.time()
for _ in range(repeat):
fut = rpc.rpc_async(worker_name(dst_rank), f, args=args)
futs.append(fut)
for val in torch.futures.wait_all(futs):
self.assertEqual(val, 0)
tok = time.time()
print(
"Rank {} finished testing {} times in {} seconds.".format(
self.rank, repeat, tok - tik
)
)
@dist_init
def test_stress_light_rpc(self):
self._stress_test_rpc(light_rpc)
@dist_init
def test_stress_heavy_rpc(self):
self._stress_test_rpc(heavy_rpc, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_stress_heavy_rpc_torchscript(self):
self._stress_test_rpc(heavy_rpc_torchscript, repeat=20, args=(torch.ones(100, 100),))
@dist_init
def test_builtin_remote_ret(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(rref.to_here(), torch.ones(n, n) * 2)
@dist_init
def test_builtin_remote_self(self):
rref = rpc.remote(
worker_name(self.rank),
torch.add,
args=(torch.ones(2, 2), torch.ones(2, 2)),
)
self.assertEqual(rref.local_value(), torch.ones(2, 2) * 2)
def _test_multi_remote_call(self, fn, args_fn=lambda x: (), kwargs_fn=lambda x: {}):
m = 10
n = self.rank + 1
dst_rank = n % self.world_size
rrefs = []
expected = []
for i in range(m):
n = n + i
rrefs.append(
rpc.remote(
worker_name(dst_rank),
fn,
args=args_fn(n),
kwargs=kwargs_fn(n),
)
)
expected.append(fn(*args_fn(n), **kwargs_fn(n)))
for i in range(m):
self.assertEqual(rrefs[i].to_here(), expected[i])
@dist_init
def test_multi_builtin_remote_ret(self):
def args_fn(n):
return (torch.ones(n, n), torch.ones(n, n))
self._test_multi_remote_call(torch.add, args_fn=args_fn)
@dist_init
def test_py_udf_remote(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(rref.to_here(), my_function(n, n + 1, n + 2))
@dist_init
def test_multi_py_udf_remote(self):
def kwargs_fn(n):
return {"a": torch.ones(n, n), "b": torch.ones(n, n), "c": torch.ones(n, n)}
self._test_multi_remote_call(my_function, kwargs_fn=kwargs_fn)
@dist_init
def test_py_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rref_args_user_share(self):
n = self.rank + 1
owner_rank = n % self.world_size
user_rank = (n + 1) % self.world_size
rref_a = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(owner_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
rref_c = rpc.remote(
worker_name(user_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init
def test_py_rpc_rref_args(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 2, 0)
)
rref_b = rpc.remote(
worker_name(dst_rank), my_function, args=(torch.ones(n, n), 1, 0)
)
c = rpc.rpc_sync(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(c, torch.ones(n, n) + 4)
@dist_init
def test_nested_remote(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.remote(
worker_name(dst_rank1),
nested_remote,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 3)
@dist_init
def test_nested_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref_of_rrefs = rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
# Say C has 2 OwnerRRefs.
# B has 2 UserRRefs to those 2 OwnerRRefs, respectively.
# This call is effectively A asking B to share its 2 UserRRefs.
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_nested_rref_stress(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
all_rrefs = []
for _ in range(20):
all_rrefs.append(
rpc.remote(
worker_name(dst_rank1),
nested_rref,
args=(worker_name(dst_rank2),),
)
)
for i in range(20):
rref_of_rrefs = all_rrefs[i]
rrefs = rref_of_rrefs.to_here()
self.assertEqual(len(rrefs), 2)
self.assertEqual(rrefs[0].to_here(), torch.ones(2, 2) + 1)
self.assertEqual(rrefs[1].to_here(), torch.ones(2, 2) + 2)
@dist_init
def test_multi_layer_nested_async_rpc(self):
# This test will exit right away, but there will be a chain of async
# RPCs. The termination algorithm should detect those messages properly.
# Otherwise, some peer could exit early, leaving others to timeout
# errors or connection closed errors.
ttl = 20
n = self.rank + 1
dst_rank = n % self.world_size
multi_layer_nested_async_rpc(dst_rank, self.world_size, ttl)
@dist_init
def test_remote_with_exception(self):
n = self.rank + 1
dst_rank = n % self.world_size
# check ref to other workers
rref = rpc.remote(worker_name(dst_rank), raise_func)
with self.assertRaises(ValueError):
rref.to_here()
# check ref to itself
rref = rpc.remote(worker_name(self.rank), no_result, args=(10,))
with self.assertRaises(TypeError):
rref.to_here()
@dist_init
def test_rpc_return_rref(self):
n = self.rank + 1
dst_rank1 = n % self.world_size
dst_rank2 = (n + 1) % self.world_size
rref = rpc.rpc_sync(
worker_name(dst_rank1),
rpc_return_rref,
args=(worker_name(dst_rank2),),
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_rref_forward_chain(self):
ttl = 8
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
ret_rref = rref_forward_chain(dst_rank, self.world_size, rref, ttl)
for i in range(ttl):
self.assertEqual(len(ret_rref), 1)
ret_rref = ret_rref[0].to_here()
ret = ret_rref
self.assertEqual(ret, torch.add(torch.ones(n, n), 1))
@dist_init
def test_local_rref_no_fork(self):
local_rref = RRef(35)
self.assertEqual(local_rref.local_value(), 35)
@dist_init
def test_local_value_not_on_owner(self):
# ensure that an error message is thrown if a user tries to call
# local_value() on a non-owning node.
next_rank = (self.rank + 1) % self.world_size
rref = rpc.remote(
worker_name(next_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
with self.assertRaisesRegex(
RuntimeError, (
fr"For UserRRef\(rref_id=GloballyUniqueId\(created_on={self.rank}, local_id=0\), "
fr"fork_id=GloballyUniqueId\(created_on={self.rank}, local_id=1\)\), "
r"can't call localValue\(\) on user "
fr"WorkerInfo\(id={self.rank}, name={worker_name(self.rank)}\). "
fr"Call it on owner WorkerInfo\(id={next_rank}, name={worker_name(next_rank)}\)"
)
):
rref.local_value()
@dist_init
def test_return_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_list = rpc.rpc_sync(
worker_name(dst_rank), get_rref_list, args=([1, 2, 3],)
)
for rref in rref_list:
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, 10),
)
rets = [
rpc.rpc_sync(
rref.owner(), _call_method_on_rref, args=(MyClass.get_value, rref)
)
for rref in rref_list
]
self.assertEqual(rets, [11, 12, 13])
@dist_init
def _test_rref_type(self, blocking):
def launched_rpc(events):
expected_name = f"rpc_{RPCExecMode.ASYNC.value}#_rref_typeof_on_owner"
return any([e.name.startswith(expected_name) for e in events])
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, torch.add, args=(torch.ones(2), 1))
with _profile() as p:
t = rref._get_type(blocking=blocking)
if not blocking:
t = t.wait()
self.assertTrue(launched_rpc(p.function_events))
expected_type = type(torch.ones(2))
self.assertEqual(t, expected_type)
futs = []
def verify(fut):
self.assertEqual(fut.value(), expected_type)
with _profile() as p:
for _ in range(10):
t = rref._get_type(blocking=blocking)
if not blocking:
futs.append(t)
t.add_done_callback(verify)
t = t.wait()
self.assertEqual(t, expected_type)
if not blocking:
# Note that cached calls with blocking=False all return the same
# cached original future.
first_fut = futs[0]
for f in futs[1:]:
self.assertTrue(f is first_fut)
# Ensure we never launch another RPC, other than for the very
# first call.
self.assertFalse(launched_rpc(p.function_events))
self.assertEqual(t, type(torch.ones(2)))
rref = rpc.remote(dst, MyClass, args=(0,))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_blocking(self):
self._test_rref_type(blocking=True)
def test_rref_type_non_blocking(self):
self._test_rref_type(blocking=False)
@dist_init
def _test_rref_type_with_error(self, blocking):
dst = worker_name((self.rank + 1) % self.world_size)
# 10 ms timeout
rref = rpc.remote(dst, raise_func)
# Blocking: error raised inline
if blocking:
with self.assertRaisesRegex(ValueError, "Expected error"):
rref._get_type(blocking=blocking)
else:
# Non-blocking: Immediately return future, block on wait
fut = rref._get_type(blocking=blocking)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
def test_rref_type_with_error_blocking(self):
self._test_rref_type_with_error(blocking=True)
def test_rref_type_with_error_non_blocking(self):
self._test_rref_type_with_error(blocking=False)
@dist_init
def _test_rref_type_owner(self, blocking):
rref = RRef(torch.ones(2) + 1)
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, type(torch.ones(2)))
rref = RRef(MyClass(0))
rref_type = rref._get_type(blocking=blocking)
if not blocking:
rref_type = rref_type.wait()
self.assertEqual(rref_type, MyClass)
def test_rref_type_owner_blocking(self):
self._test_rref_type_owner(blocking=True)
def test_rref_type_owner_non_blocking(self):
self._test_rref_type_owner(blocking=False)
@staticmethod
def _slow_add(x, y):
time.sleep(1)
return x + y
@dist_init
def test_rref_type_slow_init(self):
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, RpcTest._slow_add, args=(torch.ones(2), 1))
self.assertEqual(rref._get_type(), type(torch.ones(2)))
@dist_init
def test_owner_equality(self):
a = RRef(40)
b = RRef(50)
other_rank = (self.rank + 1) % self.world_size
other_a = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_b = rpc.remote(
worker_name(other_rank), torch.add, args=(torch.ones(1), 1)
)
other_a.to_here() # to ensure clean termination
other_b.to_here()
self.assertNotEqual(a.owner(), 23)
self.assertEqual(other_a.owner(), other_b.owner())
self.assertNotEqual(a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_a.owner())
self.assertEqual(other_a.owner(), other_b.owner())
self.assertEqual(a.owner(), a.owner())
self.assertEqual(a.owner(), b.owner())
self.assertEqual(a.owner(), rpc.get_worker_info())
x = dict()
x[a.owner()] = a
x[other_a.owner()] = other_a
self.assertEqual(x[a.owner()], a)
self.assertEqual(x[b.owner()], a)
self.assertEqual(x[other_a.owner()], other_a)
self.assertEqual(x[other_b.owner()], other_a)
self.assertEqual(len(x), 2)
@dist_init
def test_pass_local_rrefs(self):
n = self.rank + 1
dst_rank = n % self.world_size
dst_worker = worker_name(dst_rank)
rref = RRef(40)
self.assertEqual(
rpc.rpc_sync(dst_worker, add_rref_to_value, args=(rref, 50)), 90
)
self.assertEqual(
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 50)).wait(), 90
)
self.assertEqual(
rpc.remote(dst_worker, add_rref_to_value, args=(rref, 50)).to_here(), 90
)
@dist_init
def test_remote_same_worker(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_a = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 2)
)
rref_b = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(n, n), 1)
)
rref_c = rpc.remote(
worker_name(dst_rank), my_rref_function, args=(rref_a, rref_b)
)
self.assertEqual(rref_c.to_here(), torch.ones(n, n) + 4)
@dist_init(setup_rpc=True)
def test_call_method_on_rref(self):
"""
Tests that it is possible to call an instance method on a remote objet
by using rref.owner() as destination of the call.
"""
vals = [10, 2, 5, 7]
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# creates a remote object
rref = rpc.remote(dst_worker, MyClass, args=(vals[0],))
# modifies state of the remote object
rpc.rpc_sync(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[1]),
)
rpc.rpc_async(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[2]),
).wait()
rpc.remote(
rref.owner(),
_call_method_on_rref,
args=(MyClass.increment_value, rref, vals[3]),
).to_here()
# queries state of the remote object
result = rpc.rpc_sync(
dst_worker, _call_method_on_rref, args=(MyClass.get_value, rref)
)
self.assertEqual(result, sum(vals))
# Notice `rpc.api.shutdown()` accesses
# `_delete_all_user_and_unforked_owner_rrefs` through
# `torch.distributed.rpc.api`, so patching
# `torch.distributed.rpc._delete_all_user_and_unforked_owner_rrefs` will
# not help.
@mock.patch.object(torch.distributed.rpc.api, "_delete_all_user_and_unforked_owner_rrefs")
def _test_rref_leak(self, _mock_delete_all_user_and_unforked_owner_rrefs, ignore_leak):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Wait for all init to complete.
dist.barrier()
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.ones(2, 2), 1),
)
import torch.distributed.rpc.api as api
if ignore_leak:
api._ignore_rref_leak = True
rpc.shutdown(graceful=True)
else:
api._ignore_rref_leak = False
with self.assertRaisesRegex(RuntimeError, "Leaking RRef"):
rpc.shutdown(graceful=True)
@dist_init(setup_rpc=False)
def test_rref_leak(self):
self._test_rref_leak(ignore_leak=False)
@dist_init(setup_rpc=False)
def test_ignore_rref_leak(self):
self._test_rref_leak(ignore_leak=True)
@dist_init
def test_rref_str(self):
rref1 = RRef(self.rank)
id_class = "GloballyUniqueId"
self.assertEqual(
"OwnerRRef({}(created_on={}, local_id=0))".format(id_class, self.rank), rref1.__str__()
)
dst_rank = (self.rank + 1) % self.world_size
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
self.assertEqual(
rref2.__str__(),
"UserRRef(RRefId = {0}(created_on={1}, local_id=1), ForkId = {0}(created_on={1}, local_id=2))".format(
id_class, self.rank
),
)
@dist_init
def test_rref_get_future(self):
# Tests that we can obtain the future corresponding to the creation of
# the RRef on remote end
if self.rank == 0:
# Builtin
rref = rpc.remote(worker_name(1), torch.add, args=(1, 1))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# UDF
rref = rpc.remote(worker_name(1), foo_add, args=())
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
# Script
rref = rpc.remote(worker_name(1), my_script_func, args=(torch.tensor(1), ))
rref.to_here()
fut = rref._get_future()
self.assertIsInstance(fut, torch._C.Future)
@dist_init
def test_rref_context_debug_info(self):
# This test checks local states that are modified by remote workers.
# This means that we would need barrier before and after every check.
# The barrier before the check makes sure that all previous states are
# cleared globally, the barrier after ensures that no following states
# change gets into the current check.
initialize_pg(self.file_init_method, self.rank, self.world_size)
# Check 1: local RRef does not update owners_ map or add a pending user.
#################################################
rref1 = RRef(self.rank)
# don't need a barrier here as local RRef is handled by this thread
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertIn("num_pending_users", info)
# RRef on local value is not added to context until shared across RPC
self.assertEqual(0, int(info["num_owner_rrefs"]))
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after the check 1
dist.barrier()
# Check 2: Sharing RRef as an arg should update owners_ map
###########################################################
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(worker_name(dst_rank), set_global_rref, args=(rref1,))
# barrier before check 2
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(1, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 2
dist.barrier()
# clear states for check 2
rpc.rpc_sync(worker_name(dst_rank), clear_global_rref)
# Wait for owner rref to be cleared.
while int(info["num_owner_rrefs"]) != 0:
info = _rref_context_get_debug_info()
time.sleep(0.1)
dist.barrier()
# Check 3: rpc.remote call should update owners_ map
####################################################
rref2 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref3 = rpc.remote(
worker_name(dst_rank), torch.add, args=(torch.ones(2, 2), 1)
)
rref2.to_here()
rref3.to_here()
# barrier before check 3
wait_until_pending_futures_and_users_flushed()
dist.barrier()
info = _rref_context_get_debug_info()
self.assertIn("num_owner_rrefs", info)
self.assertEqual(2, int(info["num_owner_rrefs"]))
# no pending users since the fork is finished
self.assertEqual(0, int(info["num_pending_users"]))
# barrier after check 3
dist.barrier()
@dist_init
def test_disable_gil_profiling(self):
# test that rpc.enable_gil_profiling(false) will result in
# GIL wait time not being recorded.
# GIL profiling should be disabled by default.
dst_rank = (self.rank + 1) % self.world_size
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertRaises(KeyError, lambda: info["agent.gil_average_wait_time_us"])
rpc.enable_gil_profiling(True)
rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(torch.ones(1), torch.ones(1))
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertIn("agent.gil_average_wait_time_us", info)
@dist_init(setup_rpc=False)
def test_local_shutdown(self):
# test that we can start RPC and then immediately locally shutdown
# without sending any messages.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init
def test_debug_info(self):
# only test keys in this test case. Values should be covered by
# individual module debug info tests
import torch.distributed.autograd as dist_autograd
info = _get_debug_info()
rref_info = _rref_context_get_debug_info()
agent_info = rpc.api._get_current_rpc_agent().get_debug_info()
autograd_info = dist_autograd._get_debug_info()
common_keys = rref_info.keys() & agent_info.keys() & autograd_info.keys()
self.assertEqual(0, len(common_keys))
expected = {}
expected.update(rref_info)
expected.update(agent_info)
expected.update(autograd_info)
# NB: Key ordering is only preserved in python 3.6+. So here, we
# manually check keys are equal.
for key in expected.keys():
self.assertIn(key, info.keys())
for key in info.keys():
self.assertIn(key, expected.keys())
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_handle_send_exceptions(self):
# test that if a callee node has gone down, we raise an appropriate
# exception instead of just crashing.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc._set_rpc_timeout(10)
# This barrier is needed to ensure that some workers do not exit before
# others have been brought up, for non ProcessGroupAgent backends.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
if self.rank == 1:
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
# allow destination worker to exit without joining
error_str = self.get_shutdown_error_regex()
wait_until_node_failure(dst_rank, error_str)
fut = rpc.rpc_async(dst_worker, torch.add, args=(torch.ones(1), 3))
# Shutdown sequence is not very well defined and as a result
# we can see any of the error messages defined in get_shutdown_error_regex.
with self.assertRaisesRegex(RuntimeError, error_str):
fut.wait()
# exit all workers non-gracefully.
rpc.shutdown(graceful=False)
@dist_init
def test_deadlock(self):
# this test is copied from https://github.com/pytorch/pytorch/issues/45089
if self.rank == 1:
dst1 = worker_name((self.rank + 1) % self.world_size)
x = torch.ones(2)
y = torch.ones(2)
rpc.rpc_async(dst1, RpcTest._slow_add, args=(x, y), timeout=15).wait()
dist_initialized = dist.is_initialized()
if not dist_initialized:
dist.init_process_group(
backend="gloo",
init_method=self.file_init_method,
rank=self.rank,
world_size=self.world_size,
)
@dist_init(setup_rpc=False)
def test_local_shutdown_with_rpc(self):
# test that we can start RPC, send RPCs, and then run local shutdown.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# A barrier is needed to ensure that all RPCs are processed.
# Otherwise, some RPCs can timeout since the receiving end
# has terminated.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# pass in graceful=False to ensure that we don't wait for other workers.
rpc.shutdown(graceful=False)
@dist_init(setup_rpc=False)
def test_set_and_get_default_rpc_timeout(self):
timeout = 0.5
# A new `RpcBackendOptions` is constructed
# when accessing `self.rpc_backend_options`.
rpc_backend_options = self.rpc_backend_options
rpc_backend_options.rpc_timeout = timeout
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
set_timeout = rpc.get_rpc_timeout()
self.assertEqual(timeout, set_timeout)
rpc.shutdown()
@dist_init
def test_default_timeout_used(self):
"""
Tests that if no timeout is passed into rpc_async and rpc_sync, then the
default timeout is used.
"""
dst_rank = (self.rank + 1) % self.world_size
rpc._set_rpc_timeout(0.001) # 1 ms
# futures should time out and be marked with an exception indicating it as such.
futs = [
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=())
for _ in range(10)
]
expected_error = self.get_timeout_error_regex()
for fut in futs:
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# ensure that if a new timeout is set old futures don't time out but new ones do.
rpc._set_rpc_timeout(200) # 200 seconds
# create a longstanding RPC.
fut1 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
# now, set a short timeout.
rpc._set_rpc_timeout(0.001)
# fut2 should time out, fut1 should not.
fut2 = rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut2.wait()
fut1.wait()
# Zero timeout means infinity, so future should run to completion.
rpc._set_rpc_timeout(0)
rpc.rpc_async(worker_name(dst_rank), my_sleep_func, args=()).wait()
# reset to default timeout so shutdown messages can process cleanly.
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init
def test_rpc_timeouts(self):
# TODO: enable timeouts for rpc.remote/RRef (https://github.com/pytorch/pytorch/issues/33803)
dst_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst_rank)
timeout = 0.1 # 100 ms
expected_error = self.get_timeout_error_regex()
# Test async UDF
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,)).wait()
# Test sync UDF
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=timeout)
# Ensure run to completion if there is no timeout and we use the default
# RPC timeout.
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# If we set a default timeout for RPCs, it should be respected, though
# still overridden if we pass in a different timeout to the APIs.
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(dst_worker, my_sleep_func, args=(1,))
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,))
# The RPCs should run to completion since we override the timeout.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=5).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=5)
# Passing in a zero timeout should ensure that the RPC won't time out.
rpc.rpc_async(dst_worker, my_sleep_func, args=(1,), timeout=0).wait()
rpc.rpc_sync(dst_worker, my_sleep_func, args=(1,), timeout=0)
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
def test_dist_init_decorator(self):
@dist_init(setup_rpc=False)
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
@dist_init
def test_func(self):
return "expected result"
self.assertEqual(test_func(self), "expected result")
def test_use_rpc_pickler(self):
class TestPickler:
pass
test_pickler = TestPickler()
with _use_rpc_pickler(test_pickler):
self.assertTrue(torch.distributed.rpc.api._default_pickler is test_pickler)
self.assertTrue(
torch.distributed.rpc.api._default_pickler is _internal_rpc_pickler
)
@dist_init
def test_wait_all(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
self.assertTrue(len(_thread_local_var.future_list) == 1)
self.assertTrue(isinstance(_thread_local_var.future_list[0], torch._C.Future))
self.assertTrue(fut.done())
self.assertEqual(fut.wait(), torch.ones(2, 2) + 1)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_multiple_call(self):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
for i in range(20):
fut = rpc.rpc_async(dst, torch.add, (torch.ones(i, i), 1))
res = rpc.rpc_sync(dst, torch.add, (torch.ones(i, i), 1))
self.assertEqual(res, torch.ones(i, i) + 1)
self.assertEqual(fut.wait(), torch.ones(i, i) + 1)
self.assertTrue(len(_thread_local_var.future_list) == 20)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_timeout(self):
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
timeout = 0.1 # 100 ms
fut = rpc.rpc_async(dst, my_sleep_func, args=(1,), timeout=timeout)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_user_func(self):
with self.assertRaises(ValueError):
with _wait_all():
self.assertTrue(_thread_local_var.future_list == [])
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
self.assertFalse(hasattr(_thread_local_var, "future_list"))
@dist_init
def test_wait_all_raise_in_body(self):
with self.assertRaises(ValueError):
with _wait_all():
raise_func()
self.assertFalse(hasattr(_thread_local_var, "future_list"))
timed_out_rpc_event = None
@staticmethod
def timed_out_rpc():
RpcTest.timed_out_rpc_event.wait()
@dist_init
def test_wait_all_exit_early_python(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func)
fut3 = rpc.rpc_async(dst, raise_func)
# We should receive the error from fut2
with self.assertRaisesRegex(ValueError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_builtin(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
fut3 = rpc.rpc_async(dst, torch.add, args=(torch.rand(10), torch.rand(5)))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, "size of tensor"):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_wait_all_exit_early_script_function(self):
# Initialize the event in the subprocess.
RpcTest.timed_out_rpc_event = Event()
# Wait for all processes to initialize event.
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
dst = worker_name((self.rank + 1) % self.world_size)
fut1 = rpc.rpc_async(dst, RpcTest.timed_out_rpc)
fut2 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
fut3 = rpc.rpc_async(dst, raise_func_script, args=(expected_err,))
# We should receive the error from fut2
with self.assertRaisesRegex(RuntimeError, expected_err):
torch.futures.wait_all([fut1, fut2, fut3])
# Unblock RPC thread for fut1
RpcTest.timed_out_rpc_event.set()
@dist_init
def test_function_not_on_callee(self):
# test that if a function does not exist on a callee, we don't crash,
# instead we get an AttributeError indicating that the func does not exist.
this_module = sys.modules[__name__]
caller_worker = "worker0"
callee_worker = "worker1"
if self.rank == 1:
# Use delattr to remove the binding of a func on this nodes
delattr(this_module, "foo_add")
# notify remote end that we have removed it.
rpc.rpc_sync(caller_worker, set_value, args=(self.rank,))
if self.rank == 0:
# func exists on caller, but not callee.
# wait for remote end to remove the binding of foo_add func.
wait_for_value_future()
# Ensure that we have the attribute on this module. Otherwise, the test could fail due to a caller-side pickling error.
self.assertTrue(hasattr(this_module, "foo_add"))
with self.assertRaisesRegex(
AttributeError, "RPC pickler does not serialize"
):
rpc.rpc_sync(callee_worker, foo_add, args=())
@dist_init
def test_non_garbage_collected_user_rref_due_to_local_circular_dependency(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
a = MyClass(1)
b = MyClass(2)
# This is to make Python not garbage collect a and b.
a.other = b
b.other = a
n = self.rank
a.rref = rpc.remote(
dst_worker_name,
torch.add,
args=(torch.ones(n, n), 2)
)
@dist_init(setup_rpc=False)
def test_use_rref_after_shutdown(self):
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
rref = rpc.remote(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
# pass in graceful=True to ensure that local UserRRefs are deleted.
rpc.shutdown(graceful=True)
with self.assertRaisesRegex(
RuntimeError, "Cannot call to_here\\(\\) on it after deletion."
):
rref.to_here()
with self.assertRaisesRegex(
RuntimeError, "Cannot call fork an UserRRef after deletion."
):
import torch.distributed.rpc.internal as internal
internal.serialize(rref)
@staticmethod
def _return_gpu_tensor():
return torch.rand(3, 3).cuda(0)
@staticmethod
def _return_gpu_tensor_list():
return [torch.rand(3, 3).cuda(0), torch.rand(3, 3).cuda(1)]
@staticmethod
def _gpu_tensor_list_arg(tensor_list):
return torch.rand(3, 3)
def _create_rref(self):
owner_rank = (self.rank + 2) % self.world_size
return rpc.remote(
worker_name(owner_rank),
torch.add,
args=(torch.zeros(2, 2), 1)
)
@dist_init
def test_user_rrefs_confirmed(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret = rpc.rpc_sync(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret, True)
@dist_init
def test_user_rrefs_confirmed_remote(self):
dst_rank = (self.rank + 1) % self.world_size
rref = self._create_rref()
ret_rref = rpc.remote(
worker_name(dst_rank),
check_rref_confirmed,
args=(rref,)
)
self.assertEqual(ret_rref.to_here(), True)
@dist_init
def test_rref_py_pickle_not_supported(self):
local_rref = RRef(35)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, "Can not pickle rref in python pickler"):
torch.save(local_rref, fname)
@dist_init
def test_remote_throw(self):
rref = rpc.remote(worker_name((self.rank + 1) % self.world_size),
raise_or_inc,
args=(torch.ones(2),))
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_non_cont_tensors(self):
if self.rank == 0:
# Create a non-contiguous tensor.
t = torch.rand(5, 5)
t_view = t.narrow(1, 2, 2)
self.assertFalse(t_view.is_contiguous())
t_cont = t_view.contiguous()
self.assertTrue(t_cont.is_contiguous())
self.assertEqual(t_view, t_cont)
# Send non-cont tensor over RPC.
next_rank = (self.rank + 1) % self.world_size
t_ret = rpc.rpc_sync(worker_name(next_rank), non_cont_test, args=(t_view, t_cont))
# Verify the returned tensor.
self.assertEqual(t_view, t_ret)
self.assertFalse(t_ret.is_contiguous())
@dist_init
def test_callback_simple(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
def callback(fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
set_by_cb.set_result(ret.clone() + 1)
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
self.assertEqual(set_by_cb.result(), torch.ones(n, n) * 2 + 1)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_wrong_arg_num(self):
set_by_cb = concurrent.futures.Future()
n = self.rank + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_fut = fut.then(my_function)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
with self.assertRaisesRegex(
RuntimeError,
"my\\_function\\(\\) missing 2 required positional arguments"
):
cb_fut.wait()
@dist_init
def test_callback_wrong_arg_type(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut0 = rpc.rpc_async(dst, torch.add, args=(torch.ones(2, 2), 1))
fut1 = fut0.then(lambda x: x + 1)
with self.assertRaisesRegex(
RuntimeError,
"unsupported operand type\\(s\\) for \\+"
):
fut1.wait()
@dist_init
def test_callback_multi(self):
num_cbs = 10
n = self.rank + 1
def callback(idx, fut):
ret = fut.wait()
self.assertEqual(ret, torch.ones(n, n) * 2)
return ret + idx
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
cb_futs = []
for idx in range(num_cbs):
cb_futs.append(fut.then(partial(callback, idx)))
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
for idx in range(num_cbs):
self.assertEqual(
cb_futs[idx].wait(),
torch.ones(n, n) * 2 + idx
)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
dst = worker_name(n % self.world_size)
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), 1)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_callback_in_rpc(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
add_use_future_cb,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_callback_with_ret(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
fut2 = rpc.rpc_async(
dst,
torch.add,
args=(fut0.wait(), 1)
).then(lambda fut1: fut1.wait() + 1)
return fut2.wait()
fut3 = rpc.rpc_async(
dst,
torch.add,
args=(torch.ones(2, 2), 1)
).then(callback)
self.assertEqual(fut3.wait(), torch.ones(2, 2) + 3)
@dist_init
def test_callback_with_error(self):
dst = worker_name((self.rank + 1) % self.world_size)
def callback(fut0):
with self.assertRaisesRegex(ValueError, "Expected error"):
fut0.wait()
raise RuntimeError("Another expected error")
fut1 = rpc.rpc_async(dst, raise_func).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
fut1.wait()
@dist_init
def test_callback_none(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
TypeError,
"incompatible function arguments."
):
rpc.rpc_async(dst, raise_func).then(None)
@dist_init
def test_add_done_callback(self):
set_by_cb = False
n = self.rank + 1
def callback(fut):
nonlocal set_by_cb
fut.wait()
set_by_cb = True
fut = rpc.rpc_async(
worker_name(n % self.world_size),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n))
)
fut.add_done_callback(callback)
fut_then = fut.then(lambda _: True)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
fut_then.wait()
self.assertTrue(set_by_cb)
self.assertEqual(fut.wait(), torch.ones(n, n) * 2)
@dist_init
def test_mark_future_twice(self):
fut = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
torch.add,
args=(torch.zeros(2, 2), 1)
)
self.assertEqual(fut.wait(), torch.zeros(2, 2) + 1)
with self.assertRaisesRegex(
RuntimeError,
"Future can only be marked completed once"
):
fut.set_result(1)
@dist_init
def test_pickle_future(self):
fut = torch.futures.Future()
errMsg = "Can not pickle torch.futures.Future"
dst = worker_name((self.rank + 1) % self.world_size)
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_sync(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.rpc_async(dst, fail_on_fut, args=(fut,))
with TemporaryFileName() as fname:
with self.assertRaisesRegex(RuntimeError, errMsg):
rpc.remote(dst, fail_on_fut, args=(fut,))
@dist_init
def test_future_done(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, torch.add, args=(torch.zeros(2), 1))
fut.wait()
self.assertTrue(fut.done())
@dist_init
def test_future_done_exception(self):
dst = worker_name((self.rank + 1) % self.world_size)
fut = rpc.rpc_async(dst, raise_func)
with self.assertRaisesRegex(ValueError, "Expected error"):
fut.wait()
self.assertTrue(fut.done())
def _test_future_cb(self, func):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1,
func,
args=(dst2, torch.ones(2, 2), 1, 2)
)
self.assertEqual(ret, torch.ones(2, 2) + 1 + 2)
@dist_init
def test_future_in_rpc(self):
self._test_future_cb(add_use_future_set_result)
@dist_init
def test_future_nested_callback(self):
self._test_future_cb(add_use_future_nested_cb)
def _run_func_in_mode(self, to, fn, mode, args=None, kwargs=None):
if mode == RPCExecMode.SYNC:
return rpc.rpc_sync(to, fn, args=args, kwargs=kwargs)
elif mode == RPCExecMode.ASYNC:
return rpc.rpc_async(to, fn, args=args, kwargs=kwargs).wait()
elif mode == RPCExecMode.REMOTE:
return rpc.remote(to, fn, args=args, kwargs=kwargs).to_here()
def _test_async_function_raise(self, mode):
with self.assertRaisesRegex(RuntimeError, "Expected error"):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_raise_func,
mode
)
@dist_init
def test_async_function_raise(self):
self._test_async_function_raise(RPCExecMode.SYNC)
@dist_init
def test_async_function_raise_async(self):
self._test_async_function_raise(RPCExecMode.ASYNC)
@dist_init
def test_async_function_raise_remote(self):
self._test_async_function_raise(RPCExecMode.REMOTE)
def _test_async_function_wrong_return_type(self, mode):
errMsg = (
"Functions decorated with @rpc\\.async_function must return a "
"torch\\.futures\\.Future object,"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
async_wrong_type,
mode
)
@dist_init
def test_async_function_wrong_return_type(self):
self._test_async_function_wrong_return_type(RPCExecMode.SYNC)
@dist_init
def test_async_function_wrong_return_type_async(self):
self._test_async_function_wrong_return_type(RPCExecMode.ASYNC)
@dist_init
def test_async_function_wrong_return_type_remote(self):
self._test_async_function_wrong_return_type(RPCExecMode.REMOTE)
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(dst1, async_add, args=(dst2, torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
def _test_async_function(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
args = (dst2, torch.ones(2, 2), 1, 2)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + 3)
@dist_init
def test_async_function_with_future_ctor(self):
self._test_async_function(async_add_with_future_ctor)
@dist_init
def test_async_function_with_future_ctor_remote(self):
self._test_async_function(
async_add_with_future_ctor,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_chained(self):
self._test_async_function(async_add_chained)
@dist_init
def test_async_function_chained_remote(self):
self._test_async_function(async_add_chained, RPCExecMode.REMOTE)
@dist_init
def test_async_function_nested(self):
self._test_async_function(async_add_nested)
@dist_init
def test_async_function_nested_remote(self):
self._test_async_function(async_add_nested, RPCExecMode.REMOTE)
@dist_init
def test_async_static_method(self):
self._test_async_function(AsyncExecutionClass.static_async_add)
@dist_init
def test_async_static_method_remote(self):
self._test_async_function(
AsyncExecutionClass.static_async_add,
RPCExecMode.REMOTE
)
@dist_init
def test_async_class_method(self):
self._test_async_function(AsyncExecutionClass.class_async_add)
@dist_init
def test_async_class_method_remote(self):
self._test_async_function(
AsyncExecutionClass.class_async_add,
RPCExecMode.REMOTE
)
def _test_test_async_class_rref_proxy(self, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(dst1, AsyncExecutionClass)
x = torch.ones(2, 2)
y = torch.ones(2, 2) + 1
if mode == RPCExecMode.SYNC:
ret = rref.rpc_sync().static_async_add(dst2, x, x, y)
ret += rref.rpc_sync().class_async_add(dst2, x, x, y)
ret += rref.rpc_sync().bound_async_add(dst2, x, x, y)
elif mode == RPCExecMode.ASYNC:
ret = rref.rpc_async().static_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().class_async_add(dst2, x, x, y).wait()
ret += rref.rpc_async().bound_async_add(dst2, x, x, y).wait()
elif mode == RPCExecMode.REMOTE:
ret = rref.remote().static_async_add(dst2, x, x, y).to_here()
ret += rref.remote().class_async_add(dst2, x, x, y).to_here()
ret += rref.remote().bound_async_add(dst2, x, x, y).to_here()
self.assertEqual(ret, 3 * 4 * x)
@dist_init
def test_async_class_rref_proxy(self):
self._test_test_async_class_rref_proxy()
@dist_init
def test_async_class_rref_proxy_async(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.ASYNC)
@dist_init
def test_async_class_rref_proxy_remote(self):
self._test_test_async_class_rref_proxy(mode=RPCExecMode.REMOTE)
def _test_async_function_multi(self, fn, mode=RPCExecMode.SYNC):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
step = 3
args = (dst2, torch.ones(2, 2), num, step)
ret = self._run_func_in_mode(dst1, fn, mode, args=args)
self.assertEqual(ret, torch.ones(2, 2) + num * step)
@dist_init
def test_async_function_multi_chained(self):
self._test_async_function_multi(async_add_chained_multi)
@dist_init
def test_async_function_multi_chained_async(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_chained_remote(self):
self._test_async_function_multi(
async_add_chained_multi,
RPCExecMode.REMOTE
)
@dist_init
def test_async_function_multi_fanout(self):
self._test_async_function_multi(async_add_multi_fanout)
@dist_init
def test_async_function_multi_fanout_async(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.ASYNC
)
@dist_init
def test_async_function_multi_fanout_remote(self):
self._test_async_function_multi(
async_add_multi_fanout,
RPCExecMode.REMOTE
)
def _test_return_future(self, mode):
with self.assertRaisesRegex(
RuntimeError,
"Can not pickle torch.futures.Future"
):
self._run_func_in_mode(
worker_name((self.rank + 1) % self.world_size),
return_future,
mode
)
@dist_init
def test_return_future(self):
self._test_return_future(RPCExecMode.SYNC)
@dist_init
def test_return_future_async(self):
self._test_return_future(RPCExecMode.ASYNC)
@dist_init
def test_return_future_remote(self):
self._test_return_future(RPCExecMode.REMOTE)
@dist_init
def test_rref_timeout(self):
# This test is similar to ones in FaultyProcessGroupTest, but is meant to be
# run with other backends besides ProcessGroup.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, my_sleep_func, args=(2, ), timeout=0.01)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
wait_until_owners_and_forks_on_rank(1, 1, rank=1)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_pg_then_rpc does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_pg_then_rpc(self):
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"init_rpc_then_pg does not work with TCP init, see https://github.com/pytorch/pytorch/issues/41614."
)
def test_init_rpc_then_pg(self):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
dist.init_process_group(
backend="gloo",
init_method=self.init_method,
rank=self.rank,
world_size=self.world_size,
)
# Test RPC.
next_rank = (self.rank + 1) % self.world_size
ret = rpc.rpc_sync(worker_name(next_rank), torch.add, args=(torch.ones(2, 2), 1))
self.assertEqual(ret, torch.ones(2, 2) + 1)
# Test PG
dist.barrier()
rpc.shutdown()
@dist_init
def test_wait_all_with_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init
def test_wait_all_with_partial_exception(self):
futs = []
dst = worker_name((self.rank + 1) % self.world_size)
for _ in range(10):
futs.append(rpc.rpc_async(dst, torch.add, args=(torch.ones(2), 1)))
futs.append(rpc.rpc_async(dst, raise_func))
with self.assertRaisesRegex(ValueError, "Expected error"):
ret = torch.futures.wait_all(futs)
@dist_init(setup_rpc=False)
@sandcastle_skip_if(
os.environ.get("RPC_INIT_WITH_TCP", None) == "1",
"Test does not work with TCP init, see https://github.com/pytorch/pytorch/issues/46491",
)
def test_init_rpc_twice(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
rpc.shutdown()
# Wait for all init to complete.
dist.barrier()
# Ensure rpc initialization works again.
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
# Verify RPCs work after re-init.
dst = worker_name((self.rank + 1) % self.world_size)
rpc.rpc_sync(dst, torch.add, args=(torch.ones(2, 2), 1))
rpc.rpc_sync(dst, foo_add, args=())
rpc.shutdown()
def test_wrong_types(self):
with self.assertRaisesRegex(
TypeError,
"Argument backend must be a member of BackendType",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend="TENSORPIPE",
)
with self.assertRaisesRegex(
TypeError,
"Argument rpc_backend_options must be an instance of RpcBackendOptions",
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=self.rpc_backend,
rpc_backend_options={"init_method": self.init_method}
)
def test_cannot_infer_backend_from_options(self):
# An exception should be raised if the backend isn't specified but
# options are given which are not an instance of any of the known
# agents' option classes.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, "Could not infer backend for options"):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
@dist_init
def test_owner_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t1 = torch.rand(10, 10, requires_grad=True)
rref = rpc.RRef(t1.sum() + t1.sum())
rref.backward()
expected_grad = torch.ones_like(t1) * 2
self.assertEqual(expected_grad, t1.grad)
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id)
self.assertEqual(expected_grad, dist_autograd.get_gradients(context_id)[t1])
# Double backward.
with dist_autograd.context() as context_id:
t2 = rpc.rpc_sync(dst, torch.add, args=(t1, t1))
rref = rpc.RRef(t2.sum())
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(expected_grad * 2, dist_autograd.get_gradients(context_id)[t1])
# Test errors.
with self.assertRaisesRegex(RuntimeError, "tensors does not require grad and does not have a grad_fn"):
rpc.RRef(torch.rand(10)).backward()
with self.assertRaisesRegex(RuntimeError, "grad can be implicitly created only for scalar outputs"):
rpc.RRef(torch.rand(10, requires_grad=True)).backward()
with self.assertRaisesRegex(RuntimeError, "Could not find autograd context with id: 100"):
rpc.RRef(torch.rand(10, requires_grad=True).sum()).backward(100)
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rpc.RRef("foo").backward()
@staticmethod
def _sum(x):
return x.sum()
@staticmethod
def _identity(x):
return x
@dist_init
def test_user_rref_backward(self):
dst = worker_name((self.rank + 1) % self.world_size)
t = torch.rand(10, requires_grad=True)
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._sum, args=(t,))
rref.backward(context_id, retain_graph=True)
rref.backward(context_id)
self.assertEqual(torch.ones_like(t) * 2, dist_autograd.get_gradients(context_id)[t])
with dist_autograd.context() as context_id:
rref = rpc.remote(dst, RpcTest._identity, args=("foo",))
with self.assertRaisesRegex(RuntimeError, "RRef should contain a tensor for .backward()"):
rref.backward(context_id)
with self.assertRaisesRegex(RuntimeError, "User RRefs require 'dist_autograd_ctx_id' to be specified"):
rref.backward()
@dist_init(setup_rpc=False)
def test_shutdown_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
if self.rank != 0:
og_func = rpc.api._broadcast_to_followers
og_rref_func = rpc.api._delete_all_user_and_unforked_owner_rrefs
# Monkey-patch _broadcast_to_followers to fail, which would ensure
# _all_gather on leader raises an exception.
def raise_error(sequence_id, objects_map):
og_func(sequence_id, objects_map)
raise RuntimeError('simulation')
# Monkey-patch _delete_all_user_and_unforked_owner_rrefs to fail,
# which would ensure barrier is not called on followers.
def rref_error():
raise RuntimeError('simulation rref')
try:
rpc.api._broadcast_to_followers = raise_error
rpc.api._delete_all_user_and_unforked_owner_rrefs = rref_error
with self.assertRaisesRegex(RuntimeError, 'simulation rref'):
rpc.shutdown()
finally:
rpc.api._broadcast_to_followers = og_func
rpc.api._delete_all_user_and_unforked_owner_rrefs = og_rref_func
else:
with self.assertRaisesRegex(RuntimeError, 'timed out in _all_gather'):
rpc.shutdown()
dist.barrier()
class CudaRpcTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(2)
@dist_init
def test_profiler_remote_cuda(self):
if self.rank != 1:
return
dst_cuda_0 = (self.rank + 1) % self.world_size
dst_cuda_1 = (self.rank + 2) % self.world_size
dst_worker_cuda_0 = worker_name(dst_cuda_0)
dst_worker_cuda_1 = worker_name(dst_cuda_1)
with _profile(use_cuda=True) as p:
fut1 = rpc.rpc_async(dst_worker_cuda_0, udf_with_torch_ops, args=(0, ))
fut2 = rpc.rpc_async(dst_worker_cuda_1, udf_with_torch_ops, args=(1, ))
fut1.wait()
fut2.wait()
def get_name(event):
return event.name[event.name.find(REMOTE_OP_STR) + len(REMOTE_OP_STR):]
function_events = p.function_events
for event in function_events:
if event.is_async:
self.assertEqual(0, event.cuda_time_total)
self.assertEqual([], event.kernels)
self.assertEqual(0, event.cuda_time)
else:
if event.node_id == 1:
continue
self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1])
if get_name(event) in EXPECTED_REMOTE_EVENTS:
self.assertGreater(event.cuda_time_total, 0)
self.assertEqual(1, len(event.kernels))
kernel = event.kernels[0]
if event.node_id == dst_cuda_0:
self.assertEqual(kernel.device, 0)
if event.node_id == dst_cuda_1:
self.assertEqual(kernel.device, 1)
self.assertGreater(event.cuda_time, 0)
# Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled
# events.
remote_events = [event for event in function_events if event.is_remote]
remote_event_names = [get_name(event) for event in remote_events if get_name(event) in EXPECTED_REMOTE_EVENTS]
self.assertEqual(set(remote_event_names), set(EXPECTED_REMOTE_EVENTS))
class FaultyAgentRpcTest(RpcAgentTestFixture):
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init(messages_to_delay={})
def test_check_failed_messages(self):
if self.rank == 0:
dst_worker_b = worker_name((self.rank + 1) % self.world_size)
dst_worker_c = worker_name((self.rank + 2) % self.world_size)
# Worker0 sends RPC to Worker1 and creates an RRef there
rref = rpc.remote(dst_worker_b, torch.add, args=(torch.ones(2, 2), torch.ones(2, 2)))
# Worker0 sends an RPC to Worker2 with the RRef as an arg
rpc.remote(dst_worker_c, add_rref_to_value, args=(rref, torch.ones(2, 2)))
# check if the output is as expected
self.assertEqual(rref.to_here(), torch.add(torch.ones(2, 2), torch.ones(2, 2)))
# explicitly delete all User RRefs
_delete_all_user_and_unforked_owner_rrefs()
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
self.assertEqual(len(self.rpc_backend_options.messages_to_delay), 2)
self.assertEqual(self.rpc_backend_options.rpc_timeout, rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"])
def test_custom_faulty_messages(self):
self.assertEqual(
set(["RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT"]),
set(self.rpc_backend_options.messages_to_fail),
)
@dist_init(faulty_messages=[])
def test_no_faulty_messages(self):
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 0)
@dist_init(messages_to_delay={"SCRIPT_CALL": 1.5})
def test_custom_messages_to_delay(self):
self.assertEqual(self.rpc_backend_options.messages_to_delay, {"SCRIPT_CALL": 1.5})
def _test_remote_message_dropped_pickle(self, dst=None):
if self.rank != 0:
return
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, my_sleep_func, args=(1,))
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Attempt to fork the RRef should raise an error indicating the rpc.remote timeout.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref._serialize()
# Test that using RRef as arg over RPC (which forks) results in the same
# error
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_async(dst_worker, add_rref_to_value, args=(rref, 1))
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle(self):
self._test_remote_message_dropped_pickle()
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_remote_message_dropped_pickle_to_self(self):
self._test_remote_message_dropped_pickle(self.rank)
def _test_remote_message_dropped_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# test the case where rpc.remote() message creation is completely dropped.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# Since we fail python_remote_call messages synchronously, the future
# corresponding to this remote call will be marked with an error when
# this function returns.
rref = rpc.remote(dst_worker, func, args=args)
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Note: during shutdown, logs will indicate "Could not find OwnerRRef..."
# on the owning nodes, this is expected because the OwnerRRef was never
# successfully created. Therefore, delAllUsers will work as expected.
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_builtin_remote_message_dropped_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_dropped_timeout(func, args, dst=0)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args)
@dist_init(faulty_messages=["PYTHON_REMOTE_CALL"])
def test_udf_remote_message_dropped_timeout_to_self(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_dropped_timeout(func, args, dst=0)
def _test_remote_message_delay_timeout(self, func, args, dst=None):
if self.rank != 0:
return
# Test the case where remote message is eventually processed on the owner,
# but the future on the creator times out before the response comes back.
dst_rank = dst if dst is not None else (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
# 10 ms timeout
rref = rpc.remote(dst_worker, func, args=args, timeout=0.001)
# Future corresponding to the remote creation should time out.
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref._get_future().wait()
# Call to ensure pending callbacks are run.
wait_until_pending_futures_and_users_flushed()
# to_here() should now pick up that rpc.remote() creation has failed.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref.to_here()
# Test the case where rpc.remote() times out, but to_here() has already
# started blocking before.
# NOTE: we only test this when not sending to self, as to_here() calls
# calls localValue(), which does not send an RPC and thus does not have
# a timeout. This can be supported by allowing future.wait() to
# take in an optional timeout (https://github.com/pytorch/pytorch/issues/39280)
if dst_rank != self.rank:
slow_rref = rpc.remote(dst_worker, func, args=args, timeout=2)
with self.assertRaisesRegex(RuntimeError, expected_error):
# to_here() should raise timeout error, since it does not know about the
# status of rpc.remote().
slow_rref.to_here(0.001)
# Note: If we proceed with shutdown, UserRRef will send out a RRefUserDelete
# but this can be a noop since it may not exist on the owner yet. Later,
# the owner can process the RRef creation and wait for the delete message,
# thus leading to a timeout.
# Therefore, we wait until we get notification that pending owners have
# been confirmed before sending out RRefUserDeletes.
if dst_rank != self.rank:
wait_until_owners_and_forks_on_rank(2, 2, rank=dst_rank)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout(self):
func = my_sleep_func
args = (2,)
self._test_remote_message_delay_timeout(func, args)
@dist_init(faulty_messages=[], messages_to_delay={"PYTHON_REMOTE_CALL": 2})
def test_udf_remote_message_delay_timeout_to_self(self):
func = my_sleep_func
args = (1,)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_builtin_delay_timeout_to_self(self):
func = torch.add
args = (torch.tensor(1), torch.tensor(1))
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args)
@dist_init(
faulty_messages=[],
messages_to_delay={"SCRIPT_REMOTE_CALL": 2, "SCRIPT_RREF_FETCH_CALL": 1},
)
def test_remote_message_script_delay_timeout_to_self(self):
func = my_script_func
args = (torch.tensor(1),)
self._test_remote_message_delay_timeout(func, args, dst=0)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = "worker{}".format(dst_rank)
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref.to_here(0.01)
rref.to_here()
@dist_init(faulty_messages=[])
def test_rpc_builtin_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
# PYTHON_CALL message types which correspond to Python UDF over RPC
# by default get a delay (see faulty_rpc_agent_test_fixture)
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(
dst_worker,
torch.add,
args=(torch.tensor(1), torch.tensor(1)),
timeout=1,
)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=1
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
fut = rpc.rpc_async(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1)), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_rpc_script_timeout(self):
next_rank = (self.rank + 1) % self.world_size
dst_worker = worker_name(next_rank)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.rpc_sync(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
fut = rpc.rpc_async(dst_worker, my_script_func, args=(torch.tensor(1),), timeout=1)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure that the currently set default timeout is large enough such
# that RPCs with delays still complete.
self.assertEqual(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC, rpc.get_rpc_timeout())
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
fut.wait()
# Ensure timeout if we set a new default and don't override
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),)
)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if we specify timeout of 0
rpc._set_rpc_timeout(0.001)
fut = rpc.rpc_async(
dst_worker, my_script_func, args=(torch.tensor(1),), timeout=0
)
fut.wait()
# Reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
# An exception should be raised if the options are not an instance of
# TensorPipeRpcBackendOptions.
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(
TypeError, "`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`"
):
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
backend=rpc.BackendType.TENSORPIPE,
rpc_backend_options=rpc_backend_options,
)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.init_method
)
rpc.init_rpc(
name=worker_name(self.rank),
rank=self.rank,
world_size=self.world_size,
# Do _not_ pass backend.
rpc_backend_options=rpc_backend_options,
)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=NUM_THREADS
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info["agent.thread_pool_size"]), NUM_THREADS)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc_backend_options,
)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
# FIXME Merge this test with the corresponding one in RpcTest.
@dist_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
# Ensure that constructing TensorPipeRpcBackendOptions with timedelta fails
with self.assertRaisesRegex(TypeError, "incompatible constructor arguments"):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
rpc_timeout=timeout,
)
@dist_init
def _test_rref_get_type_timeout(self, blocking):
# Test where we try to get the type of a RRef from an owner, but RRef
# creation is slower than timeout passed into _get_type.
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.5
expected_err = self.get_timeout_error_regex()
# Blocking: blocks on inline call
if blocking:
with self.assertRaisesRegex(RuntimeError, expected_err):
slow_rref._get_type(timeout=timeout, blocking=blocking)
# Non-blocking: blocks on wait
else:
fut = slow_rref._get_type(timeout=timeout, blocking=blocking)
with self.assertRaisesRegex(RuntimeError, expected_err):
fut.wait()
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
def test_rref_get_type_timeout_blocking(self):
self._test_rref_get_type_timeout(blocking=True)
def test_rref_get_type_timeout_non_blocking(self):
self._test_rref_get_type_timeout(blocking=False)
@dist_init
def test_op_with_invalid_args(self):
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Overloaded torch operator invoked from Python failed to many any schema"
):
rpc.rpc_sync(dst, torch.add, args=())
def _test_rref_proxy_timeout(self, rref_proxy_api):
dst_rank = (self.rank + 1) % self.world_size
dst = worker_name(dst_rank)
rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), ))
# Ensure RRef is created on remote node.
rref.to_here()
rref_api = getattr(rref, rref_proxy_api)
self.assertTrue(rref_api is not None, f"Failed to get RRef proxy api: {rref_proxy_api}")
expected_error = self.get_timeout_error_regex()
timeout = 2
with self.assertRaisesRegex(RuntimeError, expected_error):
result = rref_api(timeout=timeout).my_slow_method(torch.ones(2, 2))
if rref_api == rref.rpc_async:
result.wait()
elif rref_api == rref.remote:
result._get_future().wait()
# Case where rpc.remote() is stuck and exceeds timeout
slow_rref = rpc.remote(dst, MyClass, args=(torch.ones(2, 2), True))
timeout = 0.01
rref_api = getattr(slow_rref, rref_proxy_api)
# Note that even when we call rref.rpc_async() in this case, we
# time out in future creation, not waiting for future. This is because
# rref proxy function calls rref._get_type before returning future,
# which blocks on the RRef being created on owner node, until the
# specified timeout.
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_api(timeout=timeout).my_instance_method(torch.ones(2, 2))
# FIXME We wait until the remote completed creating the OwnerRRef
# because there's currently a race if we shut down RPC before that.
slow_rref.to_here()
@dist_init
def test_rref_proxy_timeout(self):
for rpc_api in ["rpc_sync", "rpc_async", "remote"]:
self._test_rref_proxy_timeout(rpc_api)
class MyConvNetForMNIST(nn.Module):
def __init__(self, device):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(1, 16, 3, 1),
nn.ReLU(),
nn.Conv2d(16, 32, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(1),
nn.Linear(4608, 128),
nn.ReLU(),
nn.Linear(128, 10),
).to(device)
self.device = device
def forward(self, x, is_rref=False):
x = x.to_here() if is_rref else x
with torch.cuda.stream(torch.cuda.current_stream(self.device)):
# intentionally adding delay to current CUDA stream
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
return self.net(x)
def __getstate__(self):
# return an empty dict to avoid inspecting the model contents on the
# owner
return {}
class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture):
def _test_device_maps(self, options, errMsg):
with self.assertRaisesRegex(ValueError, errMsg):
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
self.assertFalse(rpc.api._is_current_rpc_agent_set())
@skip_if_lt_x_gpu(2)
def test_device_maps_wrong_worker_name(self):
options = self.rpc_backend_options
options.set_device_map("none_exist", {0: 1})
self._test_device_maps(
options,
errMsg="Node worker0 has invalid target node names in its device maps"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_local_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {torch.cuda.device_count(): 0})
self._test_device_maps(
options,
errMsg="Node worker0 has source devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_max_remote_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: torch.cuda.device_count()})
self._test_device_maps(
options,
errMsg="Node worker0 has target devices with invalid indices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_many_to_one(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
options.set_device_map(dst, {0: 0})
self._test_device_maps(
options,
errMsg="Node worker0 has duplicated target devices in its device map for worker1"
)
@skip_if_lt_x_gpu(2)
def test_device_maps_one_to_many(self):
if self.rank == 0:
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1})
with self.assertRaisesRegex(
ValueError, "`set_device_map` only supports 1-to-1 mapping"
):
options.set_device_map(dst, {0: 0})
@skip_if_lt_x_gpu(1)
def test_device_maps_invalid_min_device(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {-1: 0})
with self.assertRaisesRegex(
RuntimeError, "Device index must not be negative"
):
options.set_device_map(dst, {0: -1})
@staticmethod
def _gpu_add(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 1]):
return (x + y).to(0)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_gpu(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {0: 1, 1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add,
args=(torch.zeros(2).to(0), torch.ones(2).to(0))
)
self.assertEqual(ret.device, torch.device(1))
self.assertEqual(ret, (torch.zeros(2) + torch.ones(2)).to(1))
rpc.shutdown()
@staticmethod
def _gpu_add_given_devices(x, y, x_to, y_to, z_to):
x_device = "cpu" if x.device.type == "cpu" else x.device.index
y_device = "cpu" if y.device.type == "cpu" else y.device.index
if x_device == x_to and y_device == y_to:
return x.to(z_to) + y.to(z_to)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_gpu(self, x_from, y_from, z_to, device_map, dst=None, fn=None):
fn = TensorPipeAgentCudaRpcTest._gpu_add_given_devices if fn is None else fn
x_to = device_map[x_from]
y_to = device_map[y_from]
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size) if dst is None else dst
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(x_from)
y = torch.ones(2).to(y_from)
ret = rpc.rpc_sync(dst, fn, args=(x, y, x_to, y_to, z_to))
reverse_device_map = {device_map[k] : k for k in device_map}
z_from = reverse_device_map[z_to]
ret_device = "cpu" if ret.device.type == "cpu" else ret.device.index
self.assertEqual(ret_device, z_from)
self.assertEqual(ret, torch.ones(2).to(z_from))
rpc.shutdown()
def test_device_map_cpu(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to="cpu",
device_map={"cpu" : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_cpu_to_gpu_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=0,
device_map={"cpu" : 0},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_cpu_to_gpu_non_default(self):
self._test_device_maps_gpu(
x_from="cpu",
y_from="cpu",
z_to=1,
device_map={"cpu" : 1},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(1)
def test_device_map_gpu_to_cpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to="cpu",
device_map={0 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_to_cpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to="cpu",
device_map={1 : "cpu"},
fn=TensorPipeAgentCudaRpcTest._gpu_add_given_devices,
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=0,
device_map={0 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=1,
device_map={1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_default_to_non_default(self):
self._test_device_maps_gpu(
x_from=0,
y_from=0,
z_to=1,
device_map={0 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_non_default_to_default(self):
self._test_device_maps_gpu(
x_from=1,
y_from=1,
z_to=0,
device_map={1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0}
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_1(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_2(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_3(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_4(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 0, 1 : 1},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_5(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_6(self):
self._test_device_maps_gpu(
x_from=0,
y_from=1,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_7(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=0,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@skip_if_lt_x_gpu(2)
def test_device_map_gpu_mixed_self_8(self):
self._test_device_maps_gpu(
x_from=1,
y_from=0,
z_to=1,
device_map={0 : 1, 1 : 0},
dst=worker_name(self.rank)
)
@staticmethod
def _gpu_add_multi_gpu(x, y):
if all([x.is_cuda, x.device.index == 1, y.is_cuda, y.device.index == 0]):
return x.to(0) + y, x - y.to(1)
else:
raise ValueError("Wrong device affinity")
def _test_device_maps_multi_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(1)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(x, y)
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_multi_gpu(dst)
@skip_if_lt_x_gpu(2)
def test_device_maps_multi_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_multi_gpu(dst)
@staticmethod
def _gpu_add_return_to_gpu(x, y):
if x.device.type == 'cpu' and y.device.type == 'cpu':
return (x + y).to(0), (x - y).to(1), (x * y).to(2), (x / y).to(3)
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(2)
def test_device_maps_in_options(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=rpc.TensorPipeRpcBackendOptions(
init_method=options.init_method,
num_worker_threads=options.num_worker_threads,
device_maps={dst: {0: 1, 1: 0}}
)
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_multi_gpu,
args=(torch.zeros(2).to(0), torch.ones(2).to(1))
)
self.assertEqual(rets[0].device, torch.device(1))
self.assertEqual(rets[1].device, torch.device(0))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(1))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
rpc.shutdown()
def _test_device_maps_return_to_gpu(self, dst):
options = self.rpc_backend_options
options.set_device_map(dst, {0: 1})
options.set_device_map(dst, {1: 2})
options.set_device_map(dst, {2: 3})
options.set_device_map(dst, {3: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_return_to_gpu,
args=(torch.zeros(2), torch.ones(2))
)
for i in range(len(rets)):
self.assertEqual(rets[i].device, torch.device((3 + i) % 4))
self.assertEqual(rets[0], (torch.zeros(2) + torch.ones(2)).to(3))
self.assertEqual(rets[1], (torch.zeros(2) - torch.ones(2)).to(0))
self.assertEqual(rets[2], (torch.zeros(2) * torch.ones(2)).to(1))
self.assertEqual(rets[3], (torch.zeros(2) / torch.ones(2)).to(2))
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu(self):
dst = worker_name((self.rank + 1) % self.world_size)
self._test_device_maps_return_to_gpu(dst)
@skip_if_lt_x_gpu(4)
def test_device_maps_return_to_gpu_self(self):
dst = worker_name(self.rank)
self._test_device_maps_return_to_gpu(dst)
@staticmethod
def _add_to_gpu(x, y):
return (x + y).to(0)
def _test_device_maps_missing_config(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = (
"TensorPipe RPC backend only supports CPU tensors by default.*"
"`set_device_map` on `TensorPipeRpcBackendOptions`"
)
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(dst, torch.add, args=(torch.zeros(2).to(0), 1))
elif mode == RPCExecMode.REMOTE:
rpc.remote(dst, torch.add, args=(torch.zeros(2).to(0), 1)).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
def _test_device_maps_missing_config_response(self, mode):
dst = worker_name((self.rank + 1) % self.world_size)
errMsg = "Response device mapping is not available"
with self.assertRaisesRegex(RuntimeError, errMsg):
if mode == RPCExecMode.SYNC:
rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
elif mode == RPCExecMode.REMOTE:
rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
).to_here()
else:
raise ValueError(f"unexpected mode {mode}")
# make sure RPC is still functioning
ret = rpc.rpc_sync(dst, torch.add, args=(torch.ones(2), 1))
self.assertEqual(ret, torch.ones(2) + 1)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config(self):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
def test_device_maps_missing_config_not_timeout(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options
)
timeout = rpc.get_rpc_timeout()
tik = time.time()
self._test_device_maps_missing_config(RPCExecMode.SYNC)
rpc.shutdown()
tok = time.time()
self.assertTrue(tok - tik < timeout)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_response_loop(self):
for _ in range(self.rpc_backend_options.num_worker_threads + 5):
self._test_device_maps_missing_config_response(RPCExecMode.SYNC)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote(self):
self._test_device_maps_missing_config(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(1)
@dist_init
def test_device_maps_missing_config_remote_response(self):
self._test_device_maps_missing_config_response(RPCExecMode.REMOTE)
@skip_if_lt_x_gpu(2)
def test_device_maps_remote(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, {1: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(
dst,
TensorPipeAgentCudaRpcTest._add_to_gpu,
args=(torch.zeros(2), 1)
)
self.assertEqual(rref.to_here().device.index, 1)
self.assertEqual(rref.to_here(), torch.ones(2).to(1))
rpc.shutdown()
@staticmethod
def _slow_add_on_user_stream(x, y):
s0 = torch.cuda.current_stream(x.device)
s1 = torch.cuda.Stream(device=x.device)
s1.wait_stream(s0)
x.record_stream(s1)
y.record_stream(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
z = x + y
s0.wait_stream(s1)
z.record_stream(s0)
return z
def _test_custom_stream(self, fn, device_map):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
options.set_device_map(dst, device_map)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
fn(dst)
rpc.shutdown()
def _test_stream_sync(self, dst):
x = torch.ones(2, 2).to(0)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
self.assertEqual(ret, 2 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream(self):
self._test_custom_stream(self._test_stream_sync, {"cuda:0": "cuda:1"})
def _test_stream_multi_async(self, dst):
futs = []
for i in range(20):
x = torch.ones(2, 2).to(0) * i
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, x)
)
)
for i in range(20):
self.assertEqual(futs[i].wait(), 2 * torch.ones(2, 2).to(0) * i)
@skip_if_lt_x_gpu(2)
def test_custom_stream_multi(self):
self._test_custom_stream(
self._test_stream_multi_async,
{"cuda:0": "cuda:1"}
)
@staticmethod
def _nested_slow_add_on_user_stream(dst, x, y, z):
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._slow_add_on_user_stream,
args=(x, y)
)
return TensorPipeAgentCudaRpcTest._slow_add_on_user_stream(ret, z)
def _test_stream_nested_sync(self, dst):
x = torch.ones(2, 2).to(0)
y = torch.ones(2, 2).to(0) * 2
z = torch.ones(2, 2).to(0) * 3
nested_dst = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
self.assertEqual(ret, 6 * x)
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested(self):
self._test_custom_stream(
self._test_stream_nested_sync,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
def _test_stream_nested_multi_async(self, dst):
if self.rank == 0:
futs = []
n = 5
xs, ys, zs = [], [], []
for i in range(n):
x = torch.ones(2, 2).to(0) * (i - 1)
y = torch.ones(2, 2).to(0) * i
z = torch.ones(2, 2).to(0) * (i + 1)
xs.append(x)
ys.append(y)
zs.append(z)
nested_dst = worker_name((self.rank + 2) % self.world_size)
futs.append(
rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._nested_slow_add_on_user_stream,
args=(nested_dst, x, y, z)
)
)
for i in range(n):
self.assertEqual(futs[i].wait(), xs[i] + ys[i] + zs[i])
@skip_if_lt_x_gpu(2)
def test_custom_stream_nested_multi(self):
self._test_custom_stream(
self._test_stream_nested_multi_async,
{"cuda:0": "cuda:1", "cuda:1": "cuda:0"}
)
@staticmethod
def _gpu_add_wrong_gpus(x, y):
if x.is_cuda and y.is_cuda:
return x.cpu() + y.cuda()
else:
raise ValueError("Wrong device affinity")
@skip_if_lt_x_gpu(1)
def test_device_mismatch(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0: 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
x = torch.zeros(2).to(0)
y = torch.ones(2).to(0)
with self.assertRaisesRegex(
RuntimeError,
"Expected all tensors to be on the same device, but found at least two devices"
):
rets = rpc.rpc_sync(
dst,
TensorPipeAgentCudaRpcTest._gpu_add_wrong_gpus,
args=(x, y)
)
rpc.shutdown()
def _test_rref_synchronization(self, local_device, remote_device):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device : remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
x = torch.randn(200, 1, 28, 28).to(local_device)
actual = rref.remote().forward(x).to_here()
expected = rref.rpc_sync().forward(x)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_to_here_synchronization1(self):
self._test_rref_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization2(self):
self._test_rref_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization3(self):
self._test_rref_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_to_here_synchronization4(self):
self._test_rref_synchronization("cuda:0", "cuda:1")
def _test_rref_as_arg_synchronization(
self,
local_device,
remote_device,
devicesOptions=None
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {local_device: remote_device})
input_src = worker_name((self.rank - 1 + self.world_size) % self.world_size)
options.set_device_map(input_src, {remote_device: local_device})
if devicesOptions is not None:
options.set_devices(devicesOptions[self.rank])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 1:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_x = RRef(torch.randn(200, 1, 28, 28).to(local_device))
actual = rref.remote().forward(rref_x, True).to_here()
expected = rref.rpc_sync().forward(rref_x, True)
self.assertEqual(actual, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization1(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization2(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization3(self):
self._test_rref_as_arg_synchronization("cuda:1", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_as_arg_synchronization4(self):
self._test_rref_as_arg_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(1)
def test_rref_as_arg_synchronization5(self):
self._test_rref_as_arg_synchronization(
"cuda:0",
"cuda:0",
[["cuda:0"] for _ in range(4)], # devicesOptions
)
@staticmethod
def _rref_relay(rref):
return rref.to_here()
def _test_rref_forward_synchronization(self, local_device, remote_device):
options = self.rpc_backend_options
input_src = worker_name(0)
model_dst = worker_name(1)
out_relay = worker_name(2)
if self.rank == 0:
# for 1) model construction 2) forward execution
options.set_device_map(model_dst, {local_device: remote_device})
# Forward output will be first copied to the relay node before
# returning to the worker. This is intentional, to test RRef
# forward CUDA stream synchronizations.
options.set_device_map(out_relay, {local_device: local_device})
elif self.rank == 1:
# worker1 hosts the model and runs forward. The forward functions
# calls RRef.to_here(), hence needs to configure the device map
options.set_device_map(input_src, {remote_device: local_device})
elif self.rank == 2:
# worker2 will get the out RRef and call to_here() and hence, needs
# to configure devcie map.
options.set_device_map(model_dst, {local_device: remote_device})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# This test compares rref.rpc_sync().forward(x) vs rref.remote().forward(x).to_here()
# If to_here() is properly synchronized with forward(x) the results must be identical
# This test needs multiple iterations and significant batch size to simulate real
# training of a CNN of MNIST-like data.
# see https://github.com/pytorch/pytorch/issues/54771
rref = rpc.remote(model_dst, MyConvNetForMNIST, args=(remote_device,))
for _ in range(10):
rref_input = RRef(torch.randn(200, 1, 28, 28).to(local_device))
rref_out = rref.remote().forward(rref_input, True)
out = rpc.remote(
out_relay,
TensorPipeAgentCudaRpcTest._rref_relay,
args=(rref_out,)
).to_here()
expected = rref.rpc_sync().forward(rref_input, True)
self.assertEqual(out, expected)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_rref_forward_synchronization1(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization2(self):
self._test_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization3(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_rref_forward_synchronization4(self):
self._test_rref_forward_synchronization("cuda:1", "cuda:1")
def _test_owner_rref_forward_synchronization(self, local_device, remote_device):
if self.rank == 0:
options = self.rpc_backend_options
options.set_device_map("w0", {local_device: remote_device})
rpc.init_rpc(
"w0",
rank=0,
world_size=1,
rpc_backend_options=options
)
model = rpc.remote(
"w0", torch.nn.Linear, (2048, 20000)
).remote().to(remote_device)
for _ in range(30):
data = torch.rand(2048, 2048).to(local_device)
output = model.rpc_sync().forward(data)
# to_here() internally calls localValue as the caller is
# the owner of the RRef.
v0 = rpc.RRef(output).remote().sum().to_here().item()
v1 = output.sum().item()
self.assertEqual(v0, v1)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_owner_rref_forward_synchronization1(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization2(self):
self._test_owner_rref_forward_synchronization("cuda:0", "cuda:1")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization3(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:0")
@skip_if_lt_x_gpu(2)
def test_owner_rref_forward_synchronization4(self):
self._test_owner_rref_forward_synchronization("cuda:1", "cuda:1")
@staticmethod
def _return_tensor_view(i):
x = torch.ones(1000, 200).cuda(0) * i
torch.cuda._sleep(10 * FIFTY_MIL_CYCLES)
# serialization of the return value will create a new tensor from the
# view, which is done outside of the user function.
return x.split(100)[0]
@skip_if_lt_x_gpu(1)
def test_tensor_view_as_return_value(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
futs = []
for i in range(5):
futs.append(rpc.rpc_async(
dst,
TensorPipeAgentCudaRpcTest._return_tensor_view,
args=(i,)
))
for i in range(5):
self.assertEqual(torch.ones(100, 200) * i, futs[i].wait())
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected source devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {0 : 0})
options.set_devices([1])
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(2)
def test_devices_option_mismatch_reverse(self):
with self.assertRaisesRegex(
ValueError,
"Node worker0 has unexpected target devices in its device map for worker1"
):
dst = worker_name((self.rank + 1) % self.world_size)
options = rpc.TensorPipeRpcBackendOptions(
init_method=self.rpc_backend_options.init_method,
num_worker_threads=self.rpc_backend_options.num_worker_threads,
device_maps={dst: {0 : 1}},
devices=[0]
)
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_int(self):
fut = Future(devices=[0])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_str(self):
fut = Future(devices=["cuda:0"])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_as_device(self):
fut = Future(devices=[torch.device("cuda", 0)])
@skip_if_lt_x_gpu(1)
def test_cuda_future_device_not_cuda(self):
with self.assertRaisesRegex(
ValueError, "Expected devices to have indices, got cpu"
):
fut = Future(devices=["cpu"])
def _test_cuda_future_extraction(self, wrapper, unwrapper, sparse_tensor):
# We check proper CUDA stream synchronization by adding to the tensor
# in one stream to get the expected value, and reading it from another stream.
future = Future(devices=["cuda:0"])
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
another_stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
if sparse_tensor:
tensor = build_sparse_tensor().to("cuda:0")
add_tensor = build_sparse_tensor().to("cuda:0")
expected_tensor = (tensor + add_tensor).coalesce()
else:
tensor = torch.zeros((100,), device="cuda:0")
add_tensor = torch.ones((100,), device="cuda:0")
expected_tensor = tensor + add_tensor
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor += add_tensor
if sparse_tensor:
tensor = tensor.coalesce()
future.set_result(wrapper(tensor))
with torch.cuda.stream(another_stream):
tensor = unwrapper(future.wait())
if sparse_tensor:
self.assertTrue(torch.eq(tensor.indices(), expected_tensor.indices()).all().item())
self.assertTrue(torch.eq(tensor.values(), expected_tensor.values()).all().item())
self.assertEqual(tensor.size(), expected_tensor.size())
else:
self.assertTrue(torch.eq(tensor, expected_tensor).all().item())
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: t, unwrapper=lambda v: v, sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_custom_class_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
wrapper=lambda t: TensorWrapper(t), unwrapper=lambda v: v.tensor, sparse_tensor=True
)
@skip_if_lt_x_gpu(2)
def test_cuda_future_callback_changes_devices(self):
# We check proper CUDA stream synchronization by filling the tensor with
# the expected value in one stream, and reading it from another stream.
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:0", "cuda:1"])
def cb(fut):
t0 = fut.value()
tensor1.copy_(t0, non_blocking=True)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(2)
def test_cuda_future_value_on_bad_device(self):
tensor0 = torch.zeros((100,), device="cuda:0")
tensor1 = torch.zeros((100,), device="cuda:1")
parent_future = Future(devices=["cuda:1"])
# As a plus, we test that futures still invoke callbacks even in case of
# error, and that the child futures are successful if those callbacks
# don't access the parent future.
def cb(fut):
with torch.cuda.device("cuda:1"):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor1.fill_(1)
return tensor1
child_future = parent_future.then(cb)
with torch.cuda.device("cuda:0"):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
torch.cuda._sleep(int(1000 * get_cycles_per_ms()))
tensor0.fill_(1)
parent_future.set_result(tensor0)
with self.assertRaisesRegex(
ValueError,
r"The result contained tensors residing on device\(s\) cuda:0 "
r"which are not among the expected device\(s\) cuda:1",
):
parent_future.wait()
with torch.cuda.device("cuda:1"):
another_stream = torch.cuda.Stream()
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(child_future.wait(), 1).all().item())
@skip_if_lt_x_gpu(1)
def test_async_execution_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t = torch.zeros((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_sleep_and_set_to_one, args=(t,))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 1).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_async_execution_nested_with_cuda_future(self):
dst = worker_name((self.rank + 1) % self.world_size)
nested_dst = worker_name((self.rank + 2) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
a = torch.ones((100,), device="cuda:0")
b = torch.ones((100,), device="cuda:0")
c = torch.ones((100,), device="cuda:0")
fut = rpc.rpc_async(dst, async_cuda_nested_add, args=(nested_dst, a, b, c))
another_stream = torch.cuda.Stream("cuda:0")
with torch.cuda.stream(another_stream):
self.assertTrue(torch.eq(fut.wait(), 3).all().item())
rpc.shutdown()
@skip_if_lt_x_gpu(1)
def test_cuda_future_modify_tensor_inplace(self):
tensor = torch.zeros((100,), device="cuda:0")
future = Future(devices=["cuda:0"])
future.set_result(tensor)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
tensor.fill_(1)
future.wait()
@skip_if_lt_x_gpu(1)
def test_cuda_future_replace_tensor(self):
tensor_list = [torch.zeros((100,), device="cuda:0")]
future = Future(devices=["cuda:0"])
future.set_result(tensor_list)
# It's weird to modify the value of a future once it's complete, but
# technically possible. Currently this is considered undefined behavior
# (in practice the future will ignore the modification and still
# synchronize with the original value). We could one day add logic to
# detect and warn or throw in such cases, but for now we just check that
# this doesn't crash.
# We set things up so that the original tensor contained in the list
# gets deleted once we replace it with the other one. This will
# invalidate any cached information held by the future.
tensor_list[0] = torch.ones((100,), device="cuda:0")
future.wait()
@skip_if_lt_x_gpu(1)
def test_rref_with_unpickleable_attributes(self):
dst = worker_name((self.rank + 1) % self.world_size)
options = self.rpc_backend_options
options.set_device_map(dst, {"cuda:0": "cuda:0"})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
rref = rpc.remote(dst, TensorWrapper, args=(torch.zeros(42, device="cuda:0"),))
rref.rpc_sync().increase(1)
ret = rref.rpc_sync().sum()
self.assertEqual(ret, 42)
rpc.shutdown()
|
test_mturk_agent.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import time
import threading
from unittest import mock
from parlai.mturk.core.agents import MTurkAgent
from parlai.mturk.core.shared_utils import AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.worker_manager as WorkerManagerFile
import parlai.mturk.core.data_model as data_model
parent_dir = os.path.dirname(os.path.abspath(__file__))
WorkerManagerFile.DISCONNECT_FILE_NAME = 'disconnect-test.pickle'
WorkerManagerFile.MAX_DISCONNECTS = 1
WorkerManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE,
AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING,
AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE,
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
class TestAssignState(unittest.TestCase):
"""
Various unit tests for the AssignState class.
"""
def setUp(self):
self.agent_state1 = AssignState()
self.agent_state2 = AssignState(status=AssignState.STATUS_IN_TASK)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(opt=self.opt, mturk_agent_ids=mturk_agent_ids)
self.worker_manager = self.mturk_manager.worker_manager
def tearDown(self):
self.mturk_manager.shutdown()
def test_assign_state_init(self):
"""
Test proper initialization of assignment states.
"""
self.assertEqual(self.agent_state1.status, AssignState.STATUS_NONE)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(self.agent_state2.status, AssignState.STATUS_IN_TASK)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
def test_message_management(self):
"""
Test message management in an AssignState.
"""
# Ensure message appends succeed and are idempotent
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 1)
self.agent_state1.append_message(MESSAGE_2)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.assertEqual(len(self.agent_state2.get_messages()), 0)
self.assertIn(MESSAGE_1, self.agent_state1.get_messages())
self.assertIn(MESSAGE_2, self.agent_state1.get_messages())
self.assertEqual(len(self.agent_state1.message_ids), 2)
self.agent_state2.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state2.message_ids), 1)
# Ensure command interactions work as expected
self.agent_state1.set_last_command(COMMAND_1)
self.assertEqual(self.agent_state1.get_last_command(), COMMAND_1)
# Ensure clearing messages acts as intended and doesn't clear agent2
self.agent_state1.clear_messages()
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(len(self.agent_state2.message_ids), 1)
def test_state_handles_status(self):
"""
Ensures status updates and is_final are valid.
"""
for status in statuses:
self.agent_state1.set_status(status)
self.assertEqual(self.agent_state1.get_status(), status)
for status in active_statuses:
self.agent_state1.set_status(status)
self.assertFalse(self.agent_state1.is_final())
for status in complete_statuses:
self.agent_state1.set_status(status)
self.assertTrue(self.agent_state1.is_final())
# TODO update the below once bonus is default
for status in complete_statuses:
self.agent_state1.set_status(status)
text, command = self.agent_state1.get_inactive_command_text()
self.assertIsNotNone(text)
self.assertIsNotNone(command)
class TestMTurkAgent(unittest.TestCase):
"""
Various unit tests for the MTurkAgent class.
"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt.copy(), mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
self.turk_agent = MTurkAgent(
self.opt.copy(),
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
def tearDown(self):
self.mturk_manager.shutdown()
disconnect_path = os.path.join(parent_dir, 'disconnect-test.pickle')
if os.path.exists(disconnect_path):
os.remove(disconnect_path)
def test_init(self):
"""
Test initialization of an agent.
"""
self.assertIsNotNone(self.turk_agent.creation_time)
self.assertIsNone(self.turk_agent.id)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertIsNone(self.turk_agent.conversation_id)
self.assertFalse(self.turk_agent.some_agent_disconnected)
self.assertFalse(self.turk_agent.hit_is_expired)
self.assertFalse(self.turk_agent.hit_is_abandoned)
self.assertFalse(self.turk_agent.hit_is_returned)
self.assertFalse(self.turk_agent.hit_is_complete)
self.assertFalse(self.turk_agent.disconnected)
self.assertTrue(self.turk_agent.alived)
def test_state_wrappers(self):
"""
Test the mturk agent wrappers around its state.
"""
for status in statuses:
self.turk_agent.set_status(status)
self.assertEqual(self.turk_agent.get_status(), status)
for status in [AssignState.STATUS_DONE, AssignState.STATUS_PARTNER_DISCONNECT]:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.submitted_hit())
for status in active_statuses:
self.turk_agent.set_status(status)
self.assertFalse(self.turk_agent.is_final())
for status in complete_statuses:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.is_final())
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 1)
self.turk_agent.append_message(MESSAGE_2)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.assertIn(MESSAGE_1, self.turk_agent.get_messages())
self.assertIn(MESSAGE_2, self.turk_agent.get_messages())
# Ensure command interactions work as expected
self.turk_agent.set_last_command(COMMAND_1)
self.assertEqual(self.turk_agent.get_last_command(), COMMAND_1)
self.turk_agent.clear_messages()
self.assertEqual(len(self.turk_agent.get_messages()), 0)
# In task checks
self.turk_agent.conversation_id = 't_12345'
self.assertTrue(self.turk_agent.is_in_task())
self.turk_agent.conversation_id = 'b_12345'
self.assertFalse(self.turk_agent.is_in_task())
def test_connection_id(self):
"""
Ensure the connection_id hasn't changed.
"""
connection_id = "{}_{}".format(
self.turk_agent.worker_id, self.turk_agent.assignment_id
)
self.assertEqual(self.turk_agent.get_connection_id(), connection_id)
def test_inactive_data(self):
"""
Ensure data packet generated for inactive commands is valid.
"""
for status in complete_statuses:
self.turk_agent.set_status(status)
data = self.turk_agent.get_inactive_command_data()
self.assertIsNotNone(data['text'])
self.assertIsNotNone(data['inactive_text'])
self.assertEqual(data['conversation_id'], self.turk_agent.conversation_id)
self.assertEqual(data['agent_id'], TEST_WORKER_ID_1)
def test_status_change(self):
has_changed = False
self.turk_agent.set_status(AssignState.STATUS_ONBOARDING)
def wait_for_status_wrap():
nonlocal has_changed # noqa 999 we don't use python2
self.turk_agent.wait_for_status(AssignState.STATUS_WAITING)
has_changed = True
t = threading.Thread(target=wait_for_status_wrap, daemon=True)
t.start()
self.assertFalse(has_changed)
time.sleep(0.07)
self.assertFalse(has_changed)
self.turk_agent.set_status(AssignState.STATUS_WAITING)
time.sleep(0.07)
self.assertTrue(has_changed)
def test_message_queue(self):
"""
Ensure observations and acts work as expected.
"""
self.mturk_manager.send_message = mock.MagicMock()
self.turk_agent.observe(ACT_1)
self.mturk_manager.send_message.assert_called_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1, ACT_1
)
# First act comes through the queue and returns properly
self.assertTrue(self.turk_agent.msg_queue.empty())
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.recieved_packets[MESSAGE_ID_1])
self.assertFalse(self.turk_agent.msg_queue.empty())
returned_act = self.turk_agent.get_new_act_message()
self.assertEqual(returned_act, ACT_1)
# Repeat act is ignored
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.msg_queue.empty())
for i in range(100):
self.turk_agent.put_data(str(i), ACT_1)
self.assertEqual(self.turk_agent.msg_queue.qsize(), 100)
self.turk_agent.flush_msg_queue()
self.assertTrue(self.turk_agent.msg_queue.empty())
# Test non-act messages
blank_message = self.turk_agent.get_new_act_message()
self.assertIsNone(blank_message)
self.turk_agent.disconnected = True
disconnect_message = self.turk_agent.get_new_act_message()
self.turk_agent.disconnected = False
self.assertEqual(
disconnect_message['text'], self.turk_agent.MTURK_DISCONNECT_MESSAGE
)
self.turk_agent.hit_is_returned = True
return_message = self.turk_agent.get_new_act_message()
self.assertEqual(return_message['text'], self.turk_agent.RETURN_MESSAGE)
self.turk_agent.hit_is_returned = False
# Reduce state
self.turk_agent.reduce_state()
self.assertIsNone(self.turk_agent.msg_queue)
self.assertIsNone(self.turk_agent.recieved_packets)
def test_message_acts(self):
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
# non-Blocking check
self.assertIsNone(self.turk_agent.message_request_time)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertEqual(returned_act, ACT_1)
self.mturk_manager.send_command.assert_called_once()
# non-Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
while returned_act is None:
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
# Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
if __name__ == '__main__':
unittest.main(buffer=True)
|
app-2020-0803-0500.py | #!/usr/bin/env python3
##jwc o #!/usr/bin/env python
# Key Notes
#
# jwc 2020-0519 Convert from Crickit_Adafruit to RoboHat_4tronix
# jwc 2020-0519 Use 'gpiozero' for noise-free servo-control
# jwc Add 'robohat.init()'
# jwc Make sure 'servod' copied in from 'RoboHat' dir
# jwc Using 'robohat's' servo cause pan to jitter/cool and tilt to get hot
from importlib import import_module
import os
import sys
import signal
import threading
import time
import json
from flask import Flask, render_template, request, Response
# jwc n from gevent.wsgi import WSGIServer
# jwc from gevent.pywsgi import WSGIServer
import config as cfg
##jwc o import io_wrapper as hw
import io_wrapper_dummy as hw
## jwc o import RPi.GPIO as GPIO
##jwc y from gpiozero import Servo
##jwc o from adafruit_crickit import crickit
##jwc y import robohat
##jwc y robohat.init()
import AutoPHat_SparkFun_Driver
AutoPHat_SparkFun_Driver.init()
AutoPHat_SparkFun_Driver.runTest()
##jwc o # make two variables for the motors to make code shorter to type
##jwc o # Right-Side
##jwc o motor_1 = crickit.dc_motor_1
##jwc o # Left-Side
##jwc o motor_2 = crickit.dc_motor_2
##jwc 'crickit' o servo_1 = crickit.servo_1
##jwc 'crickit' o servo_2 = crickit.servo_2
#jwc Due to 'robohat.py' using 'GPIO.setmode(GPIO.BOARD)', then convert from BCM Pin# to Board Pin#
##jwc o myGPIO=24
##jwc o myGPIO_02=25
##jwc o Board#: myGPIO = 18
##jwc o Board#: myGPIO_02 = 22
# jwc: Convert from Board# to BCM#
myGPIO = 24
myGPIO_02 = 25
# gpiozero.exc.GPIOPinInUse: pin 6 is already in use by <gpiozero.PWMOutputDevice object on pin GPIO6, active_high=True, is_active=True>
# gpiozero.exc.GPIOPinInUse: pin 18 is already in use by <gpiozero.PWMOutputDevice object on pin GPIO18, active_high=True, is_active=True>
##jwc bcm# y myGPIO = 6
##jwc bcm#24y myGPIO_02 = 18
##jwc ? myGPIO = 24
##jwc n myGPIO_02 = 25 ## Became hot after 5'
##jwc n myGPIO_02 = 6 ## hot also
## jwc ? myGPIO_02 = 18
##jwc o myCorrection=0.45
#jwc remove correction for smaller range
myCorrection=0.00
maxPW=(2.0+myCorrection)/1000
minPW=(1.0-myCorrection)/1000
##jwc y servo = Servo(myGPIO)
##jwc y servo_02 = Servo(myGPIO_02)
##jwc y servo = Servo(myGPIO,min_pulse_width=minPW,max_pulse_width=maxPW)
##jwc y servo_02 = Servo(myGPIO_02,min_pulse_width=minPW,max_pulse_width=maxPW)
##jwc n servo.mid()
##jwc n servo_02.mid()
##jwc 'crickit' ##jwc o GPIO.setmode(GPIO.BCM)
##jwc 'crickit' ##jwc o GPIO.setwarnings(False)
##jwc 'crickit'
##jwc 'crickit'
##jwc 'crickit' ##jwc o servo = 22, 22 is Board Pin #
##jwc 'crickit' ##jwc y servo = 22 # jwc bcm=25
##jwc 'crickit' ##jwc o servo_Pin = 18 # jwc bcm=24
##jwc 'crickit' ##jwc o servo_02_Pin = 22 # jwc bcm=25
##jwc 'crickit'
##jwc 'crickit' ##jwc o bug: GPIO.setmode(GPIO.BOARD).py
##jwc 'crickit' ##jwc o GPIO.setmode(GPIO.BOARD)
##jwc 'crickit' ##TODO jwc jittery: GPIO.setup(servo_Pin, GPIO.OUT)
##jwc 'crickit' ##jwc o GPIO.setup(servo_02_Pin, GPIO.OUT)
##jwc 'crickit'
##jwc 'crickit' # jwc: 5ms = 0.005s -> 1 / 200 = 200Hz
##jwc 'crickit' ##TODO jwc jittery: servo = GPIO.PWM(servo_Pin, 200) # frequency is 500Hz, so each pulse is 5ms wide
##jwc 'crickit' ##jwc o servo_02 = GPIO.PWM(servo_02_Pin, 200) # frequency is 500Hz, so each pulse is 5ms wide
##jwc 'crickit'
##jwc 'crickit' # servos will be fully left at 0.5ms, centred at 1.5ms and fully servoPwm_PositionMin at 2.5ms
##jwc 'crickit' #
##jwc 'crickit' servoPwm_PositionMax = 50/5
##jwc 'crickit' servoPwm_PositionMid = 150/5
##jwc 'crickit' servoPwm_PositionMin = 250/5
##jwc 'crickit'
##jwc 'crickit' ##TODO jwc jitter: servo.start(servoPwm_PositionMid) # start it at 50% - should be servoPwm_PositionMid of servo
##jwc 'crickit' servo_02.start(servoPwm_PositionMid) # start it at 50% - should be servoPwm_PositionMid of servo
##jwc 'crickit' #p.ChangeDutyCycle(100)
# import camera driver
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
##jwc o from camera import Camera
# Default to most sophisticated tech
from camera_opencv import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
import logging
log = logging.getLogger('werkzeug')
##jwc o log.setLevel(logging.ERROR)
log.setLevel(logging.INFO)
##jwc yo app = Flask(__name__)
app = Flask(__name__, static_url_path='/static')
# Immobilizes sytem (chocks on) after 'timeout' seconds
def watchdog_timer():
while cfg.watchdog_Alive_Bool:
# jwc: Pauses every 1sec
##jwc o time.sleep(1)
##jwc
time.sleep(5)
if cfg.watchdog_Start_On_Bool:
cfg.watchdog_Cycles_Now += 1
##jwc print("*** DEBUG: cfg.watchdog_Cycles_Now: " + str(cfg.watchdog_Cycles_Now))
# jwc: appears that beyond 10sec is bad disconnect,
# so engage 'chocks/disable bot', if not already
if cfg.watchdog_Cycles_Now > cfg.timeout_Cycles_MAX and not cfg.chocks:
chocks_on()
if cfg.watchdog_Cycles_Now <= cfg.timeout_Cycles_MAX and cfg.chocks:
chocks_off()
# Handler for a clean shutdown when pressing Ctrl-C
def signal_handler(signal, frame):
hw.light_blue_blink(0.1)
cfg.watchdog_Alive_Bool = False
cfg.camera_active = False
brakes_on()
# jwc: Wait until thread terminates
watchDog.join()
##jwc o http_server.close()
hw.light_blue_off()
sys.exit(0)
# Handler for explorer-hat touchpads
def touch_handler(channel, event):
if channel == 1:
cfg.blue = not cfg.blue
if cfg.blue:
hw.light_blue_on()
hw.output_one_on()
else:
hw.light_blue_off()
hw.output_one_off()
if channel == 2:
cfg.yellow = not cfg.yellow
if cfg.yellow:
hw.light_yellow_on()
hw.output_two_on()
else:
hw.light_yellow_off()
hw.output_two_off()
if channel == 3:
cfg.chocks = not cfg.chocks
# jwc: Chocks set to True: Admin Lock
if cfg.chocks:
# jwc: Since Motors not free to operate, Watchdog not needed
cfg.watchdog_Start_On_Bool = False
chocks_on()
# jwc: Chocks set to False: Admin Unlock
else:
# jwc: Since Motors are free to operate, Watchdog is needed
cfg.watchdog_Start_On_Bool = True
chocks_off()
if channel == 4:
hw.light_green_blink(0.1)
cfg.green = True
time.sleep(5)
if cfg.chocks:
hw.light_green_on()
##jwc o os.system("sudo -s shutdown -h now")
else:
hw.light_green_off()
cfg.green = False
def brakes_on():
cfg.brakes = True
cfg.left_motor = 0
cfg.right_motor = 0
##jwc o hw.motor_one_speed(cfg.right_motor)
##jwc o hw.motor_two_speed(cfg.left_motor)
# jwc: Motors free to operate: Lo-Level: User-Level
def brakes_off():
cfg.brakes = False
cfg.watchdog_Cycles_Now = 0
def chocks_on():
cfg.chocks = True
brakes_on()
hw.light_red_blink(0.2)
# jwc: Motors free to operate: Hi-Level: Admin-Level ~ Overrides User-Level for Security/Safety
def chocks_off():
cfg.chocks = False
brakes_off()
hw.light_red_off()
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
""" jwc y
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app.route('/motor')
def motor():
left = request.args.get('l')
##o if left and not cfg.chocks:
if left:
left = int(left)
if left >= -100 and left <= 100:
##o cfg.left_motor = left
##o hw.motor_two_speed(cfg.left_motor)
left_normalized = (left / 100 )
motor_1.throttle = left_normalized
time.sleep(3)
motor_1.throttle = -1 * left_normalized
time.sleep(3)
motor_1.throttle = 0
time.sleep(3)
servo_1.angle = 90
time.sleep(3)
servo_1.angle = 135
time.sleep(3)
servo_1.angle = 45
time.sleep(3)
print("motor-left: " + str(servoPwm_PositionMax) + " " + str(left_normalized))
return 'ok'
"""
# URL for motor control - format: /motor?l=[speed]&r=[speed]
@app.route('/motor')
def motor():
left = request.args.get('l')
right = request.args.get('r')
left_normalized = 0
right_normalized = 0
if left and not cfg.chocks:
left_Int = int(left)
cfg.left_motor = left_Int
left_Absolute = abs( left_Int )
if left_Int >= -100 and left_Int <= 100:
##jwc yo cfg.left_motor = left
##jwc o hw.motor_two_speed(cfg.left_motor)
##jwc o left_normalized = (left / 100 )
if left_Int >= 0:
##jwc n robohat.motorLeft_Plus_Fn(left_Absolute)
##jwc y robohat.motorLeft_Plus_Fn(left_Absolute)
AutoPHat_SparkFun_Driver.motorLeft_Fn( left_Int )
# Sample Test Servo
##TODO jwc jittery: servo.min()
##TODO jwc jitter: servo.ChangeDutyCycle(servoPwm_PositionMax)
##jwc 'crickit' y servo_02.ChangeDutyCycle(servoPwm_PositionMax)
##jwc y servo.max()
AutoPHat_SparkFun_Driver.servo_Cam_01_Pan_Fn( 0 )
elif left_Int < 0:
##jwc y robohat.motorLeft_Minus_Fn(left_Absolute)
AutoPHat_SparkFun_Driver.motorLeft_Fn( left_Int )
# Sample Test Servo
##TODO jwc jittery: servo.max()
##TODO jwc jitter: servo.ChangeDutyCycle(servoPwm_PositionMin)
##jwc 'crickit' y servo_02.ChangeDutyCycle(servoPwm_PositionMin)
##jwc y servo.min()
AutoPHat_SparkFun_Driver.servo_Cam_01_Pan_Fn( 180 )
else:
print("*** Error: Invalid Value: left_Int: ", left_Int)
##jwc o motor_1.throttle = left_normalized
if right and not cfg.chocks:
right_Int = int(right)
cfg.right_motor = right_Int
right_Absolute = abs( right_Int )
if right_Int >= -100 and right_Int <= 100:
##jwc o cfg.right_motor = right
##jwc o hw.motor_one_speed(cfg.right_motor)
##jwc o right_normalized = (right / 100 )
if right_Int >= 0:
##jwc y robohat.motorRight_Plus_Fn(right_Absolute)
AutoPHat_SparkFun_Driver.motorRight_Fn( right_Int )
# Sample Test Servo
##TODO jwc jittery: servo_02.min()
##TODO jwc jitter: servo.ChangeDutyCycle(servoPwm_PositionMax)
##jwc 'crickit' y servo_02.ChangeDutyCycle(servoPwm_PositionMax)
##jwc y servo_02.max()
AutoPHat_SparkFun_Driver.servo_Cam_02_Tilt_Fn( 0 )
elif right_Int < 0:
##jwc y robohat.motorRight_Plus_Fn(right_Absolute)
AutoPHat_SparkFun_Driver.motorRight_Fn( right_Int )
# Sample Test Servo
##TODO jwc jittery: servo_02.max()
##TODO jwc jittery: servo.ChangeDutyCycle(servoPwm_PositionMin)
##jwc 'crickit' y servo_02.ChangeDutyCycle(servoPwm_PositionMin)
##jwc y servo_02.max()
AutoPHat_SparkFun_Driver.servo_Cam_02_Tilt_Fn( 180 )
else:
print("*** Error: Invalid Value: right_Int: ", right_Int)
##jwc o motor_2.throttle = right_normalized
##jwc y print("*** DEBUG: motor: l" + str(left_normalized) + " r" + str(right_normalized))
print("*** DEBUG: motor: l" + str(left_int) + " r" + str(right_int))
return 'ok'
""" jwc o
# URL for joystick input - format: /joystick?x=[x-axis]&y=[y-axis]
@app.route('/joystick')
def joystick():
cfg.watchdog_Cycles_Now = 0
x_axis = int(request.args.get('x'))
y_axis = int(request.args.get('y'))
x_axis = -1 * max( min(x_axis, 100), -100)
y_axis = max( min(y_axis, 100), -100)
v = (100-abs(x_axis)) * (y_axis/100) + y_axis
w = (100-abs(y_axis)) * (x_axis/100) + x_axis
r = int((v+w) / 2)
l = int((v-w) / 2)
if not cfg.chocks:
cfg.right_motor = r
cfg.left_motor = l
hw.motor_one_speed(cfg.right_motor)
hw.motor_two_speed(cfg.left_motor)
return 'ok'
"""
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
# URL to remote control touchpads 1-4 on explorer-hat
@app.route('/touchpad')
def touchpad():
pad = request.args.get('pad')
if pad:
touch_handler(int(pad), True)
return 'ok'
# URL for heartbeat requests (resets watchdog timer)
# Returns JSON object with status data
@app.route('/heartbeat')
def heartbeat():
cfg.watchdog_Cycles_Now = 0
output = {}
output['b'] = cfg.blue
output['y'] = cfg.yellow
output['c'] = cfg.chocks
output['g'] = cfg.green
output['f'] = cfg.video_fps
output['v'] = cfg.video_status
output['l'] = cfg.left_motor
##jwc o output['l'] = motor_1.throttle
output['r'] = cfg.right_motor
##jwc o output['r'] = motor_2.throttle
output['i1'] = hw.input_one_read()
output['i2'] = hw.input_two_read()
output['i3'] = hw.input_three_read()
output['i4'] = hw.input_four_read()
##jwc o output['a1'] = hw.analog_one_read()
##jwc o output['a2'] = hw.analog_two_read()
##jwc o output['a3'] = hw.analog_three_read()
##jwc o output['a4'] = hw.analog_four_read()
return json.dumps(output)
if __name__ == '__main__':
print("*** DEBUG: __main__")
hw.light_green_blink(0.1)
time.sleep(1)
hw.light_green_off()
# register signal handler for a clean exit
signal.signal(signal.SIGINT, signal_handler)
##jwc o # register handler for touchpads
##jwc o if hw.explorerhat:
##jwc o hw.xhat.touch.released(touch_handler)
# prepare and start watchdog
# jwc since watchdog happens so much (seems infinite loop and recursive) and interferes w/ debug, thus turn off
#
watchDog = threading.Thread(name='watchdog_timer', target=watchdog_timer)
watchDog.start()
##jwc o app.run(host='0.0.0.0', debug=False, threaded=True)
##
##jwc n app.run(host='192.168.1.80', debug=False, threaded=True)
##jwc to not conflict with other apps
##jwc y app.run(host='0.0.0.0', port=5001, debug=False, threaded=True)
## jwc NameError: name 'WSGIServer' is not defined
##jwc on http_server = WSGIServer(('', 5001), app)
##jwc on http_server.serve_forever()
##jwc y app.run(host='0.0.0.0', port=5001, debug=False, threaded=True)
##jwc n seems to cause rpi crash and video-stream not work: app.run(host='0.0.0.0', port=5001, debug=True, threaded=True)
##jwc y app.run(host='0.0.0.0', port=5001, debug=True, threaded=False)
##jwc y app.run(host='0.0.0.0', port=5001, debug=True, threaded=True)
# jwc: Does 'debug=False' prevents two instances of 'main()'
# jwc: TYJ camera seems to work now, via 'run: start debugging', esp. after rpi reboot
##jwc yo app.run(host='0.0.0.0', threaded=True)
## y app.run(host='0.0.0.0', debug=True, threaded=True)
##jwc y app.run(host='0.0.0.0', threaded=True)
##jwc y app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
##jwc n app.run(host='0.0.0.0', port=80, debug=False, threaded=True)
app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
##jwc app.run(host='0.0.0.0', port=8888, debug=False, threaded=True)
|
SignatureDPI.py | import socket
import threading
from scapy.all import *
signatureTable = [(71, (70, b'\x90\xf6')), (None, (60, b'\x07\x03\x04')), (346, (54, b'\xf9\xa0\xba\xa8\x81\x8d\xdc'))]
firstIface = 'eth0'
firstIfaceFlows = ['52:54:00:00:00:65']
secondIface = 'eth1'
secondIfaceFlows = ['52:54:00:a1:54:c0']
def inOutServer():
global signatureTable
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((firstIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((secondIface, 0))
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in firstIfaceFlows:
continue
except:
continue
if TCP in et:
if et[IP][TCP].dport == 443:
for signature in signatureTable:
if signature[0] != None:
if len(et[IP][TCP]) != signature[0]:
continue
if signature[1] != None:
if len(et[IP][TCP]) < (signature[1][0] + len(signature[1][1])):
continue
if len(signature[1][1]) > 1:
if bytes(et[IP][TCP])[signature[1][0]-1:(signature[1][0] - 1 + len(signature[1][1]))] != signature[1][1]:
continue
else:
if bytes(et[IP][TCP])[signature[1][0]-1] != signature[1][1][0]:
continue
del et[IP].ihl
del et[IP].len
del et[IP].chksum
et[IP].options = IPOption_RR()
et.show2(dump=True)
break
outSocket.send(bytes(et))
def outInServer():
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((secondIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((firstIface, 0))
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in secondIfaceFlows:
continue
except:
continue
outSocket.send(bytes(et))
inOut = threading.Thread(target=inOutServer,args=())
outIn = threading.Thread(target=outInServer,args=())
outIn.start()
inOut.start()
inOut.join()
|
91porn_cookie.py | import requests
import os,re,time,random,threading
def Handler(start, end, url, filename):
headers = {'Range': 'bytes=%d-%d' % (start, end)}
with requests.get(url, headers=headers,stream=True) as r:
with open(filename, "r+b") as fp:
fp.seek(start)
var = fp.tell()
fp.write(r.content)
def download(url,tittle, num_thread = 10):
r = requests.head(url)
try:
file_name = tittle
file_size = int(r.headers['content-length'])
except:
print("检查URL,或不支持对线程下载")
return
fp = open(file_name, "wb")
fp.truncate(file_size)
fp.close()
part = file_size // num_thread
for i in range(num_thread):
start = part * i
if i == num_thread - 1:
end = file_size
else:
end = start + part
t = threading.Thread(target=Handler, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name})
t.setDaemon(True)
t.start()
# 等待所有线程下载完成
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
print('%s 下载完成' % file_name)
def random_ip():
a=random.randint(1,255)
b=random.randint(1,255)
c=random.randint(1,255)
d=random.randint(1,255)
return(str(a)+'.'+str(b)+'.'+str(c)+'.'+str(d))
def get_cookie():
with open('cookie.txt','r') as f:
cookies={}
for line in f.read().split(';'):
name,value=line.strip().split('=',1) #1代表只分割一次
cookies[name]=value
return cookies
flag=1
while flag<=100:
tittle=[]
base_url='http://91porn.com/view_video.php?viewkey='
page_url='http://91porn.com/v.php?next=watch&page='+str(flag)
get_page=requests.get(url=page_url)
viewkey=re.findall(r'<a target=blank href="http://91porn.com/view_video.php\?viewkey=(.*)&page=.*&viewtype=basic&category=.*?">\n <img ',str(get_page.content,'utf-8',errors='ignore'))
for key in viewkey:
headers={'Accept-Language':'zh-CN,zh;q=0.9','User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36','X-Forwarded-For':random_ip(),'referer':page_url,'Content-Type': 'multipart/form-data; session_language=cn_CN'}
video_url=[]
img_url=[]
s = requests.Session()
base_req=s.get(url=base_url+key,headers=headers,cookies=get_cookie(),verify=False)
video_url=re.findall(r'<source src="(.*?)" type=\'video/mp4\'>',str(base_req.content,'utf-8',errors='ignore'))
tittle=re.findall(r'<div id="viewvideo-title">(.*?)</div>',str(base_req.content,'utf-8',errors='ignore'),re.S)
img_url=re.findall(r'poster="(.*?)"',str(base_req.content,'utf-8',errors='ignore'))
try:
t=tittle[0]
tittle[0]=t.replace('\n','')
t=tittle[0].replace(' ','')
except IndexError:
pass
if os.path.exists(str(t))==False:
try:
download(str(video_url[0]),str(t)+'.mp4')
except:
pass
else:
print('已存在文件夹,跳过')
time.sleep(2)
flag=flag+1
print('此页已下载完成,下一页是'+str(flag))
|
plot-data.py | """
This demo demonstrates how to draw a dynamic matplotlib
plot in a wxPython application.
This code is based on Eli Bendersky's code found here:
http://eli.thegreenplace.net/files/prog_code/wx_mpl_dynamic_graph.py.txt
"""
import os
import random
import wx
import serial
import threading
import json
import time
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
import numpy as np
import pylab
import math
class DataSource(object):
read_data = True
def __init__(self, graph):
self.graph = graph
# start separate thread to generate dummy data
t1 = threading.Thread(target=self.read_loop)
t1.start()
def close(self):
self.read_data = False
# read loop for dummy data
def read_loop(self):
t = 0
while self.read_data:
time.sleep(0.05)
sensor = math.sin(t)
t += 0.2
# update plot
if isinstance(self.graph, wx.Frame):
self.graph.update_data(sensor)
wx.CallAfter(self.graph.draw_plot)
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
# handle window close event
self.Bind(wx.EVT_CLOSE, self.on_exit)
# set data source
self.source = DataSource(self)
self.data = []
self.create_main_panel()
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigureCanvasWxAgg(self.panel, -1, self.fig)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def init_plot(self):
self.fig = Figure((6.0, 3.0), dpi=100)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('black')
self.axes.set_title('Sensor data', size=12)
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
self.plot_data = self.axes.plot(self.data, linewidth=1, color=(1, 1, 0))[0]
def update_data(self, sensor):
self.data.append(sensor)
def draw_plot(self):
""" Redraws the plot
"""
xmax = len(self.data) if len(self.data) > 50 else 50
xmin = xmax - 50
ymin = round(min(self.data), 0) - 1
ymax = round(max(self.data), 0) + 1
self.axes.set_xbound(lower=xmin, upper=xmax)
self.axes.set_ybound(lower=ymin, upper=ymax)
self.axes.grid(True, color='gray')
pylab.setp(self.axes.get_xticklabels(), visible=True)
self.plot_data.set_xdata(np.arange(len(self.data)))
self.plot_data.set_ydata(np.array(self.data))
self.canvas.draw()
def on_exit(self, event):
self.source.close()
self.Destroy()
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
|
session.py | from os import wait
import threading
import requests
import time
from .common import *
from .synguar_algo import synguar_3H, synguar_singleH
KEEPALIVE_STRPROSE = 15
KEEPALIVE_STRSTUN = 0
is_number = lambda x : type(x) == int or type(x) == float
class Session():
def __init__(self, synthesizer, example_file, epsilon, delta, k, api_endpoint, full_trace_ref):
self.synthesizer = synthesizer
self.example_file = example_file
assert(is_number(epsilon))
assert(epsilon > 0 and epsilon < 1)
assert(is_number(delta))
assert(epsilon > 0 and epsilon < 1)
assert(is_number(k))
assert(k > 0 and k == int(k))
self.epsilon = epsilon
self.delta = delta
self.k = k
self.full_trace_ref = full_trace_ref
self.running_status = "WAITING"
self.session_thread = None
self.api_endpoint = api_endpoint
def start(self):
self.session_thread = threading.Thread(target=self._run_session)
print(f"# [session] start: {self.synthesizer} | {self.example_file} | e={self.epsilon} | d={self.delta} | k={self.k} ...")
self.running_status = "RUNNING"
self.session_thread.start()
def wait_for_api_result(self, synthesizer, example_file, example_size, cache_only, no_counting):
request_data = {
"synthesizer": synthesizer,
"example_file": example_file,
"example_size": example_size,
"keepalive": None,
"no_counting": no_counting,
"_cache_only": cache_only
}
if synthesizer == "StrSTUN":
request_data["keepalive"] = KEEPALIVE_STRSTUN
elif synthesizer == "StrPROSE":
request_data["keepalive"] = KEEPALIVE_STRPROSE
else:
assert(False)
# TODO: send api request
resp_data = None
result_data = None
wait_time = 0.0625
while(True):
resp_data = None
try:
resp = requests.post(self.api_endpoint, json=request_data)
if resp.status_code != 200:
print("# [session] WARNING try send api request failed. status_code:", resp_data.status_code)
raise Exception("Status_Code_Not_200")
resp_data = resp.json()
except Exception as e:
print(bcolors.warn("# [session] WARNING request failed:"), e)
# check if resp_data is OK
if resp_data is not None and resp_data["syntask_status"] == "DONE":
assert(resp_data["syntask_data"]["example_file"] == example_file)
assert(resp_data["syntask_data"]["example_size"] == example_size)
result_data = resp_data["result"]
break
else:
time.sleep(wait_time)
wait_time *= 2
if wait_time > 4:
wait_time = 4
print(f"# [session] wait_for_api_result Returning {synthesizer} | {example_file} | {example_size} : {result_data['hs']}")
return result_data
def _run_session(self):
def get_result_with_sample_size(sample_size, no_counting=False):
return self.wait_for_api_result(self.synthesizer, self.example_file, sample_size, cache_only=False, no_counting=no_counting)
def get_result_with_sample_size_cache_only(sample_size, no_counting=False):
return self.wait_for_api_result(self.synthesizer, self.example_file, sample_size, cache_only=True, no_counting=no_counting)
if self.synthesizer == "StrPROSE":
self.full_trace = synguar_singleH(self.epsilon, self.delta, self.k, self.full_trace_ref, get_result_with_sample_size)
elif self.synthesizer == "StrSTUN":
self.full_trace = synguar_3H(self.epsilon, self.delta, self.k, self.full_trace_ref, get_result_with_sample_size) # get_result_with_sample_size_cache_only
else:
print("# [session] ERROR Unknown synthesizer:", self.synthesizer)
assert(False)
print(f"# [session] Done. {self.synthesizer} | {self.example_file} | {self.epsilon} | {self.delta} | {self.k}")
self.running_status = "DONE" |
main.py | from flask import Flask, render_template, request, session, redirect, url_for, flash
from threading import Thread
import os
import json
import smtplib
from email.mime.text import MIMEText
app = Flask(__name__)
app.secret_key = os.urandom(24)
@app.route("/")
def home():
return render_template("index.html")
############### OLD
def sendEmail(to):
Email = "no.reply.smart.switch@gmail.com"
Pwd = os.environ["PWD"]
sub = "SmartSwitch sensor status update."
body = f"""
Your switch is on however the sensor detected that theres no one in the room."""
msg = MIMEText(body)
msg["Subject" ] = sub
msg["From"] = Email
msg["To"] = to[0]
try:
smtp_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_server.ehlo()
smtp_server.login(Email, Pwd)
smtp_server.sendmail(Email, to, msg.as_string())
smtp_server.close()
print ("Email sent successfully!")
return True
except Exception as ex:
print ("Something went wrong….",ex)
return False
@app.route("/prototype1/")
def PrototypeHome():
return render_template("/prototype1/home/index.html")
#nigger
@app.route("/prototype1/login/", methods=["GET", "POST"])
def login():
if request.method == "POST":
users = json.load(open("users.json"))
e = request.form["email"]
p = request.form["password"]
if e in users["users"]:
if p == users["users"][e]["password"]:
session.permanent = True
session["user"] = e
return redirect(url_for("control"))
else:
flash("Incorect email or password", "info")
return redirect(url_for("login"))
else:
flash("Incorect email or password", "info")
return redirect(url_for("login"))
if "user" in session:
return redirect(url_for("control"))
return render_template("/prototype1/login/index.html")
@app.route("/prototype1/createAccount/", methods=["GET", "POST"])
def createAccount():
if request.method == "POST":
users = json.load(open("users.json"))
e = request.form["email"]
p = request.form["password"]
if e in users["users"]:
flash("Account with this email exists", "info")
return redirect(url_for("createAccount"))
users["users"][e] = {"password" : p, "switches" : {}}
open("users.json", "w").write(json.dumps(users, indent = 4))
flash("Account Created", "info")
return redirect(url_for("login"))
return render_template("/prototype1/createAccount/index.html")
@app.route("/prototype1/control", methods = ["POST", "GET"])
def control():
if "user" not in session:
flash("Log in first", "info")
return redirect(url_for("login"))
return render_template("/prototype1/control/index.html")
@app.route("/switch/<id>/<task>/<e>")
def switch(id, task, e):
if task == "set":
users = json.load(open("users.json"))
state = users["users"][e]["switches"][id]
if state == "0":
state = "1"
elif state == "1":
state = "0"
users["users"][e]["switches"][id] = state
open("users.json", "w").write(json.dumps(users, indent = 4))
return state
if task == "get":
users = json.load(open("users.json"))
for u in users["users"]:
print(u)
if id in users["users"][u]["switches"]:
state = users["users"][u]["switches"][id]
print("req")
return state
if task == "add":
usableSwitches = json.load(open("usableSwitches.json"))
if id not in usableSwitches:
return "ID does not exist"
users = json.load(open("users.json"))
users["users"][session["user"]]["switches"][id] = "1"
usableSwitches.remove(id)
open("users.json", "w").write(json.dumps(users, indent = 4))
open("usableSwitches.json", "w").write(json.dumps(usableSwitches, indent = 4))
return "ID Added"
if task == "getAll":
switches = json.load(open("users.json"))["users"][session["user"]]["switches"]
return switches
if task == "statusUpdate":
users = json.load(open("users.json"))
state = -1
email = ""
for u in users["users"]:
if id in users["users"][u]["switches"]:
email = u
state = users["users"][u]["switches"][id]
if state == "1":
sendEmail(email)
return "ok"
return "ok"
return "This page is not for you."
@app.errorhandler(404)
def not_found(e):
return render_template('index.html')
def run():
app.run(host='0.0.0.0')
def keep_alive():
t = Thread(target=run)
t.start()
if __name__ == "__main__":
keep_alive()
|
train_multi_gpu.py | """Training IGMC model on the MovieLens dataset."""
import os
import sys
import time
import glob
import random
import argparse
from shutil import copy
import numpy as np
import torch as th
import torch.nn as nn
import torch.optim as optim
import traceback
from functools import wraps
from _thread import start_new_thread
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from model import IGMC
from data import MovieLens
from dataset import MovieLensDataset, collate_movielens
from utils import MetricLogger
os.environ['TZ'] = 'Asia/Shanghai'
time.tzset()
# According to https://github.com/pytorch/pytorch/issues/17199, this decorator
# is necessary to make fork() and openmp work together.
def thread_wrapped_func(func):
"""
Wraps a process entry point to make it work with OpenMP.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = mp.Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
def evaluate(model, loader, device):
# Evaluate RMSE
model.eval()
device = th.device(device)
mse = 0.
for batch in loader:
with th.no_grad():
preds = model(batch[0].to(device))
labels = batch[1].to(device)
mse += ((preds - labels) ** 2).sum().item()
mse /= len(loader.dataset)
return np.sqrt(mse)
def adj_rating_reg(model):
if isinstance(model, DistributedDataParallel):
model = model.module
arr_loss = 0
for conv in model.convs:
weight = conv.weight.view(conv.num_bases, conv.in_feat * conv.out_feat)
weight = th.matmul(conv.w_comp, weight).view(conv.num_rels, conv.in_feat, conv.out_feat)
arr_loss += th.sum((weight[1:, :, :] - weight[:-1, :, :])**2)
return arr_loss
def train_epoch(proc_id, n_gpus, model, loss_fn, optimizer, arr_lambda, loader, device, log_interval):
model.train()
device = th.device(device)
epoch_loss = 0.
iter_loss = 0.
iter_mse = 0.
iter_cnt = 0
iter_dur = []
for iter_idx, batch in enumerate(loader, start=1):
t_start = time.time()
preds = model(batch[0].to(device))
labels = batch[1].to(device)
loss = loss_fn(preds, labels).mean() + arr_lambda * adj_rating_reg(model)
optimizer.zero_grad()
loss.backward()
if n_gpus > 1:
for param in model.parameters():
if param.requires_grad and param.grad is not None:
th.distributed.all_reduce(param.grad.data,
op=th.distributed.ReduceOp.SUM)
param.grad.data /= n_gpus
optimizer.step()
if proc_id == 0:
epoch_loss += loss.item() * preds.shape[0]
iter_loss += loss.item() * preds.shape[0]
iter_mse += ((preds - labels) ** 2).sum().item()
iter_cnt += preds.shape[0]
iter_dur.append(time.time() - t_start)
if iter_idx % log_interval == 0:
print("Iter={}, loss={:.4f}, mse={:.4f}, time={:.4f}".format(
iter_idx, iter_loss/iter_cnt, iter_mse/iter_cnt, np.average(iter_dur)))
iter_loss = 0.
iter_mse = 0.
iter_cnt = 0
return epoch_loss / len(loader.dataset)
@thread_wrapped_func
def train(proc_id, n_gpus, args, devices, movielens):
# Start up distributed training, if enabled.
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
th.cuda.set_device(dev_id)
# set random seed in each gpu
th.manual_seed(args.seed)
if th.cuda.is_available():
th.cuda.manual_seed_all(args.seed)
# Split train_dataset and set dataloader
train_rating_pairs = th.split(th.stack(movielens.train_rating_pairs),
len(movielens.train_rating_values)//args.n_gpus,
dim=1)[proc_id]
train_rating_values = th.split(movielens.train_rating_values,
len(movielens.train_rating_values)//args.n_gpus,
dim=0)[proc_id]
train_dataset = MovieLensDataset(
train_rating_pairs, train_rating_values, movielens.train_graph,
args.hop, args.sample_ratio, args.max_nodes_per_hop)
train_loader = th.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, collate_fn=collate_movielens)
if proc_id == 0:
if args.testing:
test_dataset = MovieLensDataset(
movielens.test_rating_pairs, movielens.test_rating_values, movielens.train_graph,
args.hop, args.sample_ratio, args.max_nodes_per_hop)
else:
test_dataset = MovieLensDataset(
movielens.valid_rating_pairs, movielens.valid_rating_pairs, movielens.train_graph,
args.hop, args.sample_ratio, args.max_nodes_per_hop)
test_loader = th.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, collate_fn=collate_movielens)
model = IGMC(in_feats=(args.hop+1)*2,
latent_dim=[32, 32, 32, 32],
num_relations=5, #dataset_base.num_rating,
num_bases=4,
regression=True,
edge_dropout=args.edge_dropout,
# side_features=args.use_features,
# n_side_features=n_features,
# multiply_by=args.multiply_by
).to(dev_id)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
loss_fn = nn.MSELoss().to(dev_id)
optimizer = optim.Adam(model.parameters(), lr=args.train_lr, weight_decay=0)
if proc_id == 0:
print("Loading network finished ...\n")
# prepare the logger
logger = MetricLogger(args.save_dir, args.valid_log_interval)
best_epoch = 0
best_rmse = np.inf
print("Start training ...")
for epoch_idx in range(1, args.train_epochs+1):
if proc_id == 0:
print ('Epoch', epoch_idx)
train_loss = train_epoch(proc_id, n_gpus,
model, loss_fn, optimizer, args.arr_lambda,
train_loader, dev_id, args.train_log_interval)
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
test_rmse = evaluate(model, test_loader, dev_id)
eval_info = {
'epoch': epoch_idx,
'train_loss': train_loss,
'test_rmse': test_rmse,
}
print('=== Epoch {}, train loss {:.6f}, test rmse {:.6f} ==='.format(*eval_info.values()))
if epoch_idx % args.train_lr_decay_step == 0:
for param in optimizer.param_groups:
param['lr'] = args.train_lr_decay_factor * param['lr']
logger.log(eval_info, model, optimizer)
if best_rmse > test_rmse:
best_rmse = test_rmse
best_epoch = epoch_idx
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
eval_info = "Training ends. The best testing rmse is {:.6f} at epoch {}".format(best_rmse, best_epoch)
print(eval_info)
with open(os.path.join(args.save_dir, 'log.txt'), 'a') as f:
f.write(eval_info)
def config():
parser = argparse.ArgumentParser(description='IGMC')
# general settings
parser.add_argument('--testing', action='store_true', default=False,
help='if set, use testing mode which splits all ratings into train/test;\
otherwise, use validation model which splits all ratings into \
train/val/test and evaluate on val only')
parser.add_argument('--gpu', default='0', type=str,
help="Comma separated list of GPU device IDs.")
parser.add_argument('--seed', type=int, default=1234, metavar='S',
help='random seed (default: 1234)')
parser.add_argument('--data_name', default='ml-100k', type=str,
help='The dataset name: ml-100k, ml-1m')
parser.add_argument('--data_test_ratio', type=float, default=0.1) # for ml-100k the test ration is 0.2
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--data_valid_ratio', type=float, default=0.2)
# parser.add_argument('--ensemble', action='store_true', default=False,
# help='if True, load a series of model checkpoints and ensemble the results')
parser.add_argument('--train_log_interval', type=int, default=100)
parser.add_argument('--valid_log_interval', type=int, default=10)
parser.add_argument('--save_appendix', type=str, default='debug',
help='what to append to save-names when saving results')
# subgraph extraction settings
parser.add_argument('--hop', default=1, metavar='S',
help='enclosing subgraph hop number')
parser.add_argument('--sample_ratio', type=float, default=1.0,
help='if < 1, subsample nodes per hop according to the ratio')
parser.add_argument('--max_nodes_per_hop', type=int, default=200,
help='if > 0, upper bound the # nodes per hop by another subsampling')
# parser.add_argument('--use_features', action='store_true', default=False,
# help='whether to use node features (side information)')
# edge dropout settings
parser.add_argument('--edge_dropout', type=float, default=0.2,
help='if not 0, random drops edges from adjacency matrix with this prob')
parser.add_argument('--force_undirected', action='store_true', default=False,
help='in edge dropout, force (x, y) and (y, x) to be dropped together')
# optimization settings
parser.add_argument('--train_lr', type=float, default=1e-3)
parser.add_argument('--train_min_lr', type=float, default=1e-6)
parser.add_argument('--train_lr_decay_factor', type=float, default=0.1)
parser.add_argument('--train_lr_decay_step', type=int, default=50)
parser.add_argument('--train_epochs', type=int, default=80)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--arr_lambda', type=float, default=0.001)
parser.add_argument('--num_rgcn_bases', type=int, default=4)
args = parser.parse_args()
args.devices = list(map(int, args.gpu.split(',')))
args.n_gpus = len(args.devices)
### set save_dir according to localtime and test mode
file_dir = os.path.dirname(os.path.realpath('__file__'))
val_test_appendix = 'testmode' if args.testing else 'valmode'
local_time = time.strftime('%y%m%d%H%M', time.localtime())
args.save_dir = os.path.join(
file_dir, 'log/{}_{}_{}_{}'.format(
args.data_name, local_time, args.save_appendix, val_test_appendix
)
)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
print(args)
# backup current .py files
for f in glob.glob(r"*.py"):
copy(f, args.save_dir)
# save command line input
cmd_input = 'python3 ' + ' '.join(sys.argv)
with open(os.path.join(args.save_dir, 'cmd_input.txt'), 'a') as f:
f.write(cmd_input)
f.write("\n")
print('Command line input: ' + cmd_input + ' is saved.')
return args
if __name__ == '__main__':
args = config()
random.seed(args.seed)
np.random.seed(args.seed)
movielens = MovieLens(args.data_name, testing=args.testing,
test_ratio=args.data_test_ratio, valid_ratio=args.data_valid_ratio)
if args.n_gpus == 1:
train(0, args.n_gpus, args, args.devices, movielens)
else:
procs = []
for proc_id in range(args.n_gpus):
p = mp.Process(target=train, args=(proc_id, args.n_gpus, args, args.devices, movielens))
p.start()
procs.append(p)
for p in procs:
p.join()
|
MasterService.py | # app.py
import os, sys, time
import json
from threading import Thread
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from webwhatsapi import WhatsAPIDriver
from pprint import pprint
# from ServiceImporter import *
import requests
# export PATH="$HOME/wholesomegarden/WhatsappReminder:$PATH"
from ServiceLoader import *
# self.runLocal = False
class MasterService(object):
# shares = []
# db = {
# "masters":["972512170493", "972547932000"],
# "users":{"id":{"services":{"groupID":None}}},
# "services":{"Reminders":{"dbID":None,"incomingTarget":None},"Proxy":{"dbID":None,"incomingTarget":None},"Danilator":{"dbID":None,"incomingTarget":None}},
# "groups": {"id":"service"},
# "id":"972547932000-1610379075@g.us"}
# services = {}
id = "Master"
name = "✨WhatsappMaster✨"
welcome = "Welcome to ✨WhatsappMaster✨ \nCheck out our services!"
help = "send a message to get it back"
# imageurl = "https://businesstech.co.za/news/wp-content/uploads/2020/09/WhatsApp-logo.png"
imageurl = "https://p.kindpng.com/picc/s/247-2476548_logo-de-whatsapp-en-colores-hd-png-download.png"
shortDescription = "Whatsapp Service Platform"
share = None
examples = {"services":{"text":"Show Public Services","thumbnail":None}}
# publicServices = ["Danilator", "Reminders", "Music"]
''' start master driver and log in '''
def __init__(self, db, api, master):
MasterService.share = self
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! MasterService",MasterService.share)
self.db = db
self.api = api
self.runLocal = master.runLocal
# self.master.db = db
# self.master.services = services
# self.master.driver = driver
self.master = master
self.commands = {"subscribe":None,"group":self.createGroup,"=":self.subscribeToService,"-":self.unsubscribe, "/":self.findElement, "services":self.showServices}
MasterService.examples = self.commands
def findElement(self, data):
text, chatID, senderID = data
# if text[0] is "/":
# "//div[@class='VPvMz']/div/div/span[@data-testid='menu']"
print("##################################")
print("##################################")
print("##### #######")
print("##################################")
print("##################################", text)
dotsSide = self.master.driver.tryOut(self.master.driver.driver.find_element_by_xpath,text,click=True)
def showServices(self, data):
text, chatID, senderID = data
# self.master.sendMessage(chatID, text)
# time.sleep(1)
self.master.sendMessage(chatID, "*List of Public Services:*")
time.sleep(1.5)
for service in self.master.publicServices:
time.sleep(0.2)
text, thumb = self.master.inviteToService(service=service,fromChat = chatID, public = True)
print("TTTTTTTTTTTTTTTTTT")
print(text, thumb)
self.master.sendMessage(chatID, text, thumbnail = thumb)
def subscribeToService(self, data):
text, chatID, senderID = data
''' person registering service with ='''
target = text[1:]
dbChanged = False
now = False
''' check target service in db '''
serviceFound = False
serviceChat = ""
for service in self.master.services:
print("______________ ----------"+service)
print("")
if not serviceFound and target.lower() == service.lower():
target = service
''' service found '''
serviceFound = True
if "users" not in self.master.db:
self.master.db["users"] = {}
if "groups" not in self.master.db:
self.master.db["groups"] = {}
if senderID not in self.master.db["users"]:
self.master.db["users"][senderID] = {}
dbChanged = True
''' first time user '''
# self.master.db["users"][senderID] = {'services': {'Reminders': {'groupID': None}}}
else:
pass
''' known user '''
foundChat = None
if service in self.master.db["users"][senderID]:
serviceChat = self.master.db["users"][senderID][service]
print("#########################################################")
# self.master.driver.sendMessage(senderID,"You are already subscirbed to: "+target+" \nYou can unsubscribe with -"+target.lower())
if serviceChat is not None:
try:
foundChat = self.master.driver.get_chat_from_id(serviceChat)
except:
print('chat could not be found')
chatName = target
welcome = "Thank you for Subscribing to "+target
try:
chatName = self.master.services[service]["obj"].name
welcome = "Thank you for Subscribing to "+chatName
welcome = self.master.services[service]["obj"].welcome
except:
pass
if foundChat is not None:
# check_participents = False
# if check_participents:
# if senderID in foundChat.get_participants_ids() or True:
# '''##### check that user is participant '''
# self.master.driver.sendMessage(chatID,"You are already subscirbed to: "+chatName+" \nYou can unsubscribe with -"+target.lower())
# self.master.driver.sendMessage(serviceChat,"subscirbed to: "+chatName)
# else:
# foundChat = None
if serviceChat in self.master.db["groups"]:
gotLink = False
groupName = service
path = self.download_image()
inviteLink = ""
print("$$$$$$$$$$$$$$$$$$$$$$$")
print(serviceChat, self.master.db["groups"][serviceChat] )
if serviceChat in self.master.db["groups"] and self.master.db["groups"][serviceChat] is not None and "invite" in self.master.db["groups"][serviceChat]:
if self.master.db["groups"][serviceChat]["invite"] is not None:
inviteLink = self.master.db["groups"][serviceChat]["invite"]
gotLink = True
if service in self.master.services and "obj" in self.master.services[service] and self.master.services[service]["obj"] is not None:
groupName = self.master.services[service]["obj"].name
imageurl = self.master.services[service]["obj"].imageurl
if imageurl is not None:
path = self.download_image(service=service,pic_url=imageurl)
content = "You are already subscirbed to:\n"+chatName+" \n"
if gotLink:
content+= inviteLink
# content+="\n"+"You can unsubscribe with -"+target.lower()
if gotLink:
res = self.master.driver.send_message_with_thumbnail(path,chatID,url=inviteLink,title="Open "+groupName,description="xxx",text=content)
else:
self.master.driver.sendMessage(chatID,content)
self.master.driver.sendMessage(serviceChat,"subscirbed to: "+chatName)
else:
foundChat = None
''' create new group '''
if foundChat is None: #NGN
print(
'''
===============================================
''' + senderID +" CREATING NEW GROUP "+ target +" :D "+'''
===============================================
'''
)
self.master.driver.sendMessage(chatID,"Creating group: "+chatName+" \nPlease wait a moment :)")
groupName = service
path = self.download_image()
obj = None
if service in self.master.services and "obj" in self.master.services[service] and self.master.services[service]["obj"] is not None:
obj = self.master.services[service]["obj"]
groupName = obj.name
imageurl = obj.imageurl
if imageurl is not None:
path = self.download_image(service=service,pic_url=imageurl)
imagepath = path
newGroupID, groupInvite = self.master.driver.newGroup(newGroupName = groupName, number = "+"+senderID.split("@")[0], local = self.runLocal, image=imagepath)
# newGroupID = newGroup.id
self.newG = newGroupID
# if self.master.db
link = self.master.newRandomID()
self.master.db["users"][senderID][service] = newGroupID
self.master.db["groups"][newGroupID] = {"service":target, "invite":groupInvite, "user":senderID, "link":link}
dbChanged = True
now = True
print(
'''
===============================================
''' + senderID +" is NOW SUBSCRIBED TO "+ target +" :D "+'''
===============================================
'''
)
res = self.master.driver.send_message_with_thumbnail(path,chatID,url=groupInvite,title="Open "+groupName,description="BBBBBBBB",text="Thank you! you are now subscribed to: "+chatName+" \n"+str(groupInvite)+"\nPlease check your new group :)")
# self.master.driver.sendMessage(senderID,"Thank you! you are now subscribed to: "+chatName+" \n"+str(groupInvite)+"\nPlease check your new group :)")
toAdd = ""
if obj is not None:
if len(obj.examples) > 0:
toAdd += "\n\n"
toAdd += "See Examples: (click the link or type)\n"
for example in obj.examples:
key = example
answer = key
text = ""
if "answer" in obj.examples[key]:
answer = obj.examples[key]["answer"]
if "text" in obj.examples[key]:
text = obj.examples[key]["text"]
toAdd += "*"+answer+"* : "+text+"\n"
toAdd += self.master.baseURL + link +"/"+key + "\n\n"
self.master.driver.sendMessage(newGroupID,welcome+toAdd)
# self.master.driver.sendMessage(serviceChat,"subscirbed to: "+target)
if not serviceFound:
self.master.driver.sendMessage(chatID,target+" : is not recognized as a service "+target)
print(
'''
===============================================
SERVICE '''+ target +" IS NOT AVAILABLE"+'''
===============================================
'''
)
if dbChanged:
self.master.backup()
def unsubscribe(self,data):
text, chatID, senderID = data
''' person unsubscribing service with -'''
target = text[1:]
dbChanged = False
now = False
''' check target service in db '''
serviceFound = False
for service in self.master.services:
print("______________ ----------"+service)
print("")
if not serviceFound and target.lower() == service.lower():
target = service
''' service found '''
serviceFound = True
if senderID not in self.master.db["users"]:
self.master.db["users"][chatID] = {}
dbChanged = True
''' first time user '''
# self.master.db["users"][senderID] = {'services': {'Reminders': {'groupID': None}}}
else:
pass
''' known user '''
foundChat = None
if chatID in self.master.db["groups"]:
if "service" in self.master.db["groups"][chatID] and target == self.master.db["groups"][chatID]["service"]:
self.master.db["groups"][chatID]["service"] = None
if service in self.master.db["users"][senderID]:
serviceChat = self.master.db["users"][senderID][service]
# self.master.driver.sendMessage(senderID,"You are already subscirbed to: "+target+" \nYou can unsubscribe with -"+target.lower())
if serviceChat is not None:
try:
oldGroup = self.master.db["users"][senderID].pop(service)
if oldGroup in self.master.db["groups"]:
self.master.db["groups"].pop(oldGroup)
self.master.driver.sendMessage(oldGroup,"Unsubscribing from: *"+service+"*")
self.master.driver.sendMessage(chatID,"Unsubscribing from: *"+service+"*")
print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU")
print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU")
print("UUUUUUU UNSUBSCRIBING UUUUUUUUUUUU")
print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU")
print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU")
print("UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU",chatID,service)
dbChanged = True
now = True
except:
print('chat could not be found')
else:
self.master.driver.sendMessage(chatID,"you are not subscirbed to: *"+service+"*"+"\n"+chatID+" / "+senderID)
if not serviceFound:
self.master.driver.sendMessage(chatID,"you are not subscirbed to: *"+service+"*")
if dbChanged:
self.master.backup()
# def createGroup(self, data, service = "Master", masterGroup = True, emptyNumber ="972543610404"):
def createGroup(self, data, service = "Master", masterGroup = True, emptyNumber ="972547932000", removeEmpty = True):
text, chatID, senderID = data
if text is not None and len(text.split("group")) > 1:
check = text.split("group")[1]
print("CCCCCCCCCCCCCCCC",check)
print("CCCCCCCCCCCCCCCC",check)
print("CCCCCCCCCCCCCCCC",check)
print("CCCCCCCCCCCCCCCC",check)
if len(check) > 1:
if "/" is check[0]:
check = check[1:]
foundService = None
for serv in self.master.services:
print(check.lower(), serv.lower())
if check.lower() == serv.lower():
foundService = serv
if foundService is not None:
service = foundService
if masterGroup:
senderID = emptyNumber
target = service
print(
'''
===============================================
''' + senderID +" CREATING NEW GROUP "+ target +" :D "+'''
===============================================
'''
)
groupName = service
path = self.download_image()
obj = None
if service in self.master.services and "obj" in self.master.services[service] and self.master.services[service]["obj"] is not None:
obj = self.master.services[service]["obj"]
groupName = self.master.services[service]["obj"].name
imageurl = self.master.services[service]["obj"].imageurl
if imageurl is not None:
path = self.download_image(service=service,pic_url=imageurl)
imagepath = path
newGroupID, groupInvite = self.master.driver.newGroup(newGroupName = groupName, number = "+"+senderID.split("@")[0], local = self.runLocal, image=imagepath)
# newGroupID = newGroup.id
welcome = "WELCOME TO WHATSAPP MASTER"
self.newG = newGroupID
# if service is not "Master":
# self.master.db["users"][senderID][service] = newGroupID
# self.master.db["groups"][newGroupID] = {"service":target, "invite":groupInvite, "link":self.master.newRandomID(), "user":senderID}
# print(
# '''
# ===============================================
# ''' + senderID +" is NOW SUBSCRIBED TO "+ target +" :D "+'''
# ===============================================
# '''
# )
# self.master.driver.sendMessage(senderID,"Thank you! you are now subscribed to: "+chatName+" \n"+str(groupInvite)+"\nPlease check your new group :)")
if obj is not None:
# print("WAIT 5")
# time.sleep(1)
# imageurl = "https://scontent.ftlv6-1.fna.fbcdn.net/v/t1.0-9/s960x960/90941246_10158370682234287_4145441832110653440_o.jpg?_nc_cat=110&ccb=2&_nc_sid=825194&_nc_ohc=8s_3FhJStQUAX-yKU8c&_nc_ht=scontent.ftlv6-1.fna&tp=7&oh=cc43986a0035414deb90a706d7b7fc2b&oe=602D4239"
#
# time.sleep(5)
# self.master.setGroupIcon(newGroupID, obj.imageurl)
# print("WAIT 5")
pass
if masterGroup:
if "availableChats" not in self.master.db:
self.master.db["availableChats"] = {}
if service not in self.master.db["availableChats"]:
self.master.db["availableChats"][service] = {}
# time.sleep(1)
# self.master.driver.remove_participant_group(newGroupID,senderID+"@c.us")
if removeEmpty:
code = "WAPI.removeParticipantGroup('"+newGroupID+"', '"+senderID+"@c.us"+"')"
self.master.driver.driver.execute_script(script=code)
if obj is not None:
imageurl = obj.imageurl
# imageurl = "https://aux2.iconspalace.com/uploads/whatsapp-flat-icon-256.png"
# imageurl = ""
self.master.setGroupIcon(newGroupID, imageurl)
# time.sleep(1)
# code = "WAPI.getMetadata('"+newGroupName+"', '"+number+"@c.us"+"')"
# time.sleep(10)
self.master.driver.sendMessage(newGroupID,welcome)
# print("##############################")
self.master.db["availableChats"][service][newGroupID] = groupInvite
# print("##############################")
# print(self.master.db["availableChats"])
# self.waitForNewParticipant(newGroupID)
if chatID is not None:
res = self.master.driver.send_message_with_thumbnail(path,chatID,url=groupInvite,title="Open "+groupName,description="BBBBBBBB",text="Creating empty group: "+groupName+" \n"+str(groupInvite)+"\nCheck it out :)")
self.master.backup()
return newGroupID, groupInvite
def runCommands(self, text, chatID, senderID):
foundCommand = False
cmd = ""
if text[0] in self.commands:
cmd = text[0]
else:
cmd = text.split("/")[0]
#
# if "/" in text:
# cmd = text.split("/")[0]
print("RUNNING COMMANDS....")
if cmd in self.commands:
''' RUN COMMAND '''
print("RUNNING COMMANDS....",cmd)
res = self.commands[cmd]([text, chatID, senderID])
foundCommand = True
return foundCommand
def ProcessChat(self,message):
print("MMMMMMMMMMX",message.content)
chatID = ""
if self.runLocal and False: #for firefox
chatID = message.chat_id["_serialized"]
else:
chatID = message.chat_id
print("!!!!!!!!!!!!!!!!!!!")
try:
chat = self.master.driver.get_chat_from_id(chatID)
except Exception as e:
print(" ::: ERROR - _serialized chatID ::: "+chatID+" ::: ","\n",e,e.args,"\n")
''' incoming from: '''
''' Personal Chat '''
print("!!!!!!!!!!!!!!!!!!!!!")
senderName = message.get_js_obj()["chat"]["contact"]["formattedName"]
senderID = message.sender.id
fromGroup = False
print("!!!!!!!!!!!!!!!!!!!",chatID)
if "c" in chatID or True:
print(
'''
===================================
Incoming Messages from '''+senderID+" "+senderName+'''
===================================
'''
)
print("!!!!!!!!!!!!!!!!!!!")
if message.type == "chat":
text = message.content
run = self.runCommands(text, chatID, senderID)
if not run:
print("======== NO COMMANDS FOUND =======", text)
''' SENT FROM GROUP CHAT '''
def go(self):
while(False):
time.sleep(1)
def process(self, info):
origin, user, content = None, None, None
if "origin" in info:
origin = info["origin"]
if "user" in info:
user = info["user"]
if "content" in info:
content = info["content"]
print("@@@@@@")
print("@@@@@@")
print("@@@@@@")
run = self.runCommands(content, origin, user)
if not run:
print("======== NO COMMANDS FOUND =======", content)
print("@@@@@@")
print("@@@@@@")
# self.api.send(origin, "WHATSAPPMASTER SERVICE\n"+content, thumnail = "test")
print("XXXXXXXXXXXXXXXZ")
print("XXXXXXXXXXXXXXXZ")
print("XXXXXXXXXXXXXXXZ")
print("XXXXXXXXXXXXXXXZ")
print("XXXXXXXXXXXXXXXZ")
# if "users" not in self.db:
# self.db["users"] = {}
#
# if user not in self.db["users"]:
# self.db["users"][user] = user
# self.api.send(origin, "WELCOME "+user)
# self.backup()
#
# res = self.master.driver.send_message_with_thumbnail(path,origin,url=myLink,title="Invite to "+groupName,description="BBBBBBBB",text="This is a link to join: "+groupName+" \n"+str(myLink)+"\nPlease check it out :)")
# self.api.send(origin, sendBack)
# self.db["upcoming"].append([origin, sendBack])
def backup(self):
self.api.backup(self.db)
def updateDB(self, db):
self.db = db
def makeDirs(self, filename):
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def download_image(self, service="test", pic_url="https://img-authors.flaticon.com/google.jpg", img_name = 'thumnail.jpg'):
if service is None or pic_url is None or img_name is None:
return None
final_path = service+"/"+img_name
self.makeDirs(final_path)
with open(final_path, 'wb') as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
return os.path.abspath(final_path)
# def send(self, api, service, target, content, thumnail = None):
# sendThread = Thread(target = self.sendAsync, args = [[api, service, target, content, thumnail]])
# sendThread.start()
# #UX WELCOME AFTER SUBSCIBING TO
# def sendAsync(self, data):
# api, service, target, content, thumbnail = data
# print("!!!!!!!!!!!!")
# if service in self.master.services:
# if self.master.services[service]["api"] is api:
# if target in self.master.db["groups"] and "service" in self.master.db["groups"][target] and service.lower() == self.master.db["groups"][target]["service"].lower():
# if thumbnail is not None:
# print("T T T T T T")
# print("T T T T T T")
# print("T T T T T T")
# imageurl = "https://media1.tenor.com/images/7528819f1bcc9a212d5c23be19be5bf6/tenor.gif"
# title = "AAAAAAAAAA"
# desc = "BBBBBBB"
# link = imageurl
# if "imageurl" in thumbnail:
# imageurl = thumbnail["imageurl"]
# if "title" in thumbnail:
# title = thumbnail["title"]
# if "desc" in thumbnail:
# desc = thumbnail["desc"]
# if "link" in thumbnail:
# link = thumbnail["link"]
#
# path = self.download_image(service = service, pic_url=imageurl)
#
# # metadata = self.master.driver.get_group_metadata(target)
# # print()
# # print(metadata)
# # print()
#
# res = self.master.driver.send_message_with_thumbnail(path,target,url=link,title=title,description=desc,text=content)
# print(res)
# print("!!!!!!!!!!!!!!")
# return res
# return self.master.driver.sendMessage(target, content)
def loadDB(self, number = None):
if number is None:
number = self.master.db["id"]
return self.master.driver.loadDB(number = number)
#
# def backupService(self, db = None, service = None, api = None):
# data = [db,service]
# # self.backupServiceAsync(data)
# if service in self.master.services:
# if self.master.services[service]["api"] is api:
# bT = Thread(target = self.backupServiceAsync,args = [data])
# bT.start()
#
# def backupServiceAsync(self,data):
# time.sleep(self.master.db["backupDelay"])
# db, service = data
# print("SSSSSSSSS",service,db)
# if time.time() - self.master.db["lastBackupServices"] < self.master.db["backupInterval"]:
# return False
#
# if service is None or len(service) == 0:
# return None
#
# backupChat = None
# if service in self.master.db["servicesDB"]:
# chatID = self.master.db["servicesDB"][service]["dbID"]
# if chatID is not None:
# bchat = None
# try:
# bchat = self.master.driver.getChat(chatID)
# except Exception as e:
# print(" ::: ERROR - COULD NOT GET BACKUPCHAT",e," ::: ","\n")
# traceback.print_exc()
# if bchat is not None:
# print("FFFFFFFFFFFFFFFUCKKK")
# # self.master.driver.sendMessage(chatID,"FFFFFFFFFFFFFFFUCKKK")
#
# backupChat = chatID
# else:
# print(" ::: ERROR - SERVICE HAS NO BACKUPCHAT"+" ::: ","\n")
#
#
# if backupChat is not None:
# if db is not None:
# return self.master.driver.updateDB(db,number=backupChat)
# else:
# return self.loadDB(backupChat)
# else:
# print(" ::: ERROR - BackupChat NOT FOUND for :"+service+": service ::: \n")
# self.master.db["lastBackupServices"] = time.time()
# def backup(self, now = None):
# bT = Thread(target = self.backupAsync,args = [now])
# bT.start()
#
# def backupAsync(self,data):
now = data
if now is None:
time.sleep(self.master.db["backupDelay"])
if time.time() - self.master.db["lastBackup"] < self.master.db["backupInterval"]:
return False
self.master.db["lastBackup"] = time.time()
return self.master.driver.updateDB(self.master.db,number=self.master.db["id"])
def quit(self):
self.master.driver.quit()
def Nothing(data):
print(":::Nothign::: DATA=",data)
def welcomeUser(self, origin):
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
if "users" not in self.db:
self.db["users"] = {}
if origin not in self.db["users"]:
self.db["users"][origin] = origin
self.backup()
# def ProcessIncoming0(self, data):
# print(
# '''
# ===================================
# Processing Incoming Messages
# ===================================
# '''
# )
# lastm = None
# loopc = 0
# delay = 0.5
# while True:
# # try:
# if True:
# if loopc % 20 == 0:
# ''' ::: rechecking status ::: '''
# try:
# self.status = status = self.master.driver.get_status()
# print(" ::: status is",status,"::: ")
# except Exception as e:
# self.status = status = "XXXXXXXX"
# print(" ::: ERROR - Status Fetching ::: ","\n",e,e.args,"\n")
#
#
# ''' all unread messages '''
# for contact in self.master.driver.get_unread():
#
# self.Process(contact)
#
#
# '''
# lastm = message
# print(json.dumps(message.get_js_obj(), indent=4))
# for contact in self.master.driver.get_contacts():
# # print("CCCC",contact.get_safe_name() )
# if sender in contact.get_safe_name():
# chat = contact.get_chat()
# # chat.send_message("Hi "+sender+" !!!*"+message.content+"*")
# print()
# print()
# print(sender)
# print()
# print()
# print("class", message.__class__.__name__)
# print("message", message)
# print("id", message.id)
# print("type", message.type)
# print("timestamp", message.timestamp)
# print("chat_id", message.chat_id)
# print("sender", message.sender)
# print("sender.id", message.sender.id)
# print("sender.safe_name", message.sender.get_safe_name())
# if message.type == "chat":
# print("-- Chat")
# print("safe_content", message.safe_content)
# print("content", message.content)
# # Manager.process(message.sender.id,message.content)
# # contact.chat.send_message(message.safe_content)
# elif message.type == "image" or message.type == "video":
# print("-- Image or Video")
# print("filename", message.filename)
# print("size", message.size)
# print("mime", message.mime)
# print("caption", message.caption)
# print("client_url", message.client_url)
# message.save_media("./")
# else:
# print("-- Other type:",str(message.type))
# print("PROCESSING MESSAGE:",message)
# '''
#
# else:
# pass
# # except Exception as e:
# # print(" ::: ERROR - CHECKING MESSAGES ::: ","\n",e,e.args,"\n")
#
# loopc += 1; loopc = loopc % 120
# time.sleep(delay)
#
# def initServicesDB0(self):
# for service in self.master.services:
# # try:
# if True:
# if "servicesDB" not in self.master.db:
# self.master.db["servicesDB"] = {}
#
# if service not in self.master.db["servicesDB"]:
# self.master.db["servicesDB"][service] = {}
#
# if "dbID" not in self.master.db["servicesDB"][service]:
# self.master.db["servicesDB"][service]["dbID"] = None
#
# dbID = self.master.db["servicesDB"][service]["dbID"]
# ''' create new db group '''
# db = {}
# if dbID is None:
# print("-------------------------------")
# print(" CREATING NEW DB GROUP "+service)
# print("-------------------------------")
# groupName = service
#
# newGroup = self.master.driver.newGroup(newGroupName = service+"_DB", number = "+"+self.master.db["masters"][1], local = self.runLocal)
# newGroupID = newGroup.id
# self.master.db["servicesDB"][service]["dbID"] = newGroupID
# db = {"init":True}
# self.master.driver.sendMessage(newGroupID, json.dumps(db))
# self.backup()
# else:
# db = self.loadDB(dbID)
#
# print("-------------------------------")
# print("service: ",service," dbID: ",dbID)
# print("-------------------------------")
# print(db)
# # while()
# self.master.services[service]["obj"].updateDB(db)
#
# # except Exception as e:
# else:
# print(" ::: ERROR - LOAD SERVICES ::: ","\n",e,e.args,"\n")
#
# def LoadServices0(self):
# # load list of services
# for service in self.master.db["services"]:
#
#
# if "reminders".lower() == service.lower():
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# ReminderService.go(sendDelegate=self.master.driver.sendMessage,backupDelegate=self.backupService)
# self.serviceFuncs["services"][service]=ReminderService.process
# groupName = "🔔 Reminders 🔔"
# self.serviceGroupNames[service] = groupName
# self.master.db["services"][service]["welcome"] = ReminderService.welcome
# self.master.db["services"][service]["groupName"] = groupName
# # self.serviceGroupNames[service] = "Reminders"
#
#
# if "danilator".lower() == service.lower():
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# print("FFFFFFFFFFFFFFFFFFFFFFFFFFF")
# DanilatorService.go(sendDelegate=self.master.driver.sendMessage,backupDelegate=self.backupService)
# self.serviceFuncs["services"][service]=DanilatorService.process
# groupName = "💚 Danilator 💚"
# self.serviceGroupNames[service] = groupName
# self.master.db["services"][service]["welcome"] = DanilatorService.welcome
# self.master.db["services"][service]["groupName"] = groupName
#
#
# # self.serviceGroupNames[service] = "Danilator"
#
# try:
# if "dbID" not in self.master.db["services"][service]:
# self.master.db["services"][service]["dbID"] = None
#
# dbID = self.master.db["services"][service]["dbID"]
# ''' create new db group '''
# if dbID is None:
# print("-------------------------------")
# print(" CREATING NEW DB GROUP "+service)
# print("-------------------------------")
# groupName = service
#
# newGroup = self.master.driver.newGroup(newGroupName = service+"_DB", number = "+"+self.master.db["masters"][1], local = self.runLocal)
# newGroupID = newGroup.id
# self.master.db["services"][service]["dbID"] = newGroupID
# self.master.driver.sendMessage(newGroupID, json.dumps({"init":True}))
# self.backup()
# else:
# print("-------------------------------")
# print("service: ",service," dbID: ",dbID)
# print("-------------------------------")
#
# except Exception as e:
# print(" ::: ERROR - LOAD SERVICES ::: ","\n",e,e.args,"\n")
#
# def initAsync0(self, profileDir = "/app/session/rprofile2"):
#
# ''' init driver variables '''
# if len(Master.shares) > 1:
# profileDir += "-"+str(len(Master.shares))
# chrome_options = webdriver.ChromeOptions()
# chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
# chrome_options.add_argument("--headless")
# chrome_options.add_argument("--disable-dev-shm-usage")
# chrome_options.add_argument("--no-sandbox")
# chrome_options.add_argument("user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36")
# chrome_options.add_argument("user-data-dir="+profileDir);
# chrome_options.add_argument('--profile-directory='+profileDir)
#
# if not self.runLocal:
# self.master.driver = WhatsAPIDriver(profile = profileDir, client='chrome', chrome_options=chrome_options,username="wholesomegarden")
# else:
# self.master.driver = WhatsAPIDriver(username="wholesomegarden",profile=None)
# driver = self.master.driver
#
# print(''' ::: waiting for login ::: ''')
# driver.wait_for_login()
# try:
# self.status = status = driver.get_status()
# except Exception as e:
# print(" ::: ERROR - Status Init ::: ","\n",e,e.args,"\n")
#
# ''' preping for qr '''
# if status is not "LoggedIn":
# img = None
# triesCount = 0
# maxtries = 40
#
# while status is not "LoggedIn" and triesCount < maxtries:
# triesCount+=1
#
# print("-------------------------------")
# print("status:",status,"tries:",triesCount,"/",maxtries)
# print("-------------------------------")
#
# self.lastQR += 1
# try:
# img = driver.get_qr("static/img/QR"+str(self.lastQR)+".png")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ")
# print("QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ",str(img)[17:130])
#
# except Exception as e:
# print(" ::: ERROR - QR Fetching ::: ","\n",e,e.args,"\n")
#
# # im_path = os.path.join("static/img/newQR.png")
#
# print(''' ::: rechecking status ::: ''')
# try:
# self.status = status = driver.get_status()
# except Exception as e :
# self.status = status = "XXXXXXXX"
# print(" ::: ERROR - Status Fetching ::: ","\n",e,e.args,"\n")
#
# if status is "LoggedIn":
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::: ::::: ''')
# print(''' :::: MASTER IS LOGGED IN! ::::: ''')
# print(''' :::: ::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# # if self.runLocal:
# # self.master.driver.save_firefox_profile(remove_old=False)
#
# ''' load DB '''
# ## overwrite to init db
# initOverwrite = False
# if initOverwrite:
# self.backup(now = True)
# # driver.updateDB(self.master.db,number=self.master.db["id"])
# lastDB = self.loadDB()
# self.master.db = lastDB
# self.master.db["init"] = time.time()
# self.master.db["backupInterval"] = 10*60
# if self.runLocal:
# self.master.db["backupInterval"] = 0
#
# self.master.db["backupDelay"] = 10
# if self.runLocal:
# self.master.db["backupDelay"] = 3
#
# self.master.db["lastBackup"] = 0
# self.master.db["lastBackupServices"] = 0
# self.backup()
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::: ::::: ''')
# print(''' :::: DATABASE LOADED ::::: ''')
# print(''' :::: ::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(''' :::::::::::::::::::::::::::::::::::: ''')
# print(self.master.db)
# print()
# #
# ''' Load Services '''
# # print("SSSSSSSSSSSSSSSSSSSSs")
# self.LoadServices()
# # print("SSSSSSSSSSSSSSSSSSSSs")
#
# ''' process incoming '''
# process = Thread(target = self.ProcessIncoming, args=[None])
# process.start()
# else:
# print(" ::: ERROR - COULD NOT LOG IN ::: ","\n")
#
# def ProcessServiceAsync0(self, obj, info):
# serviceT = Thread(target = self.ProcessService, args = [[obj,info]])
# serviceT.start()
#
# def ProcessService0(self, data):
# try:
# service, chatID, text = data
# obj, info = data
# obj.process(info)
# self.serviceFuncs["services"][service](chatID, text)
# except Exception as e:
# print(" ::: ERROR - Processing Service ::: ",serice,":::",chatID,":::",text,":::","\n",e,e.args,"\n")
# ''' running master '''
# master = None
# timeout = time.time()
# maxtimeout = 30
# while master is None and time.time()-timeout < maxtimeout:
# try:
# # if True:
# # master = Master()
# # print("9999999999999999999999999999")
# # print("9999999999999999999999999999")
# # print("9999999999999999999999999999")
# # print("9999999999999999999999999999")
#
# maxtimeout = 0
#
# # else:
# # pass
# except Exception as e:
# print(" ::: ERROR - init Master ::: ","\n",e,e.args,"\n")
#
# ''' running front server '''
# from flask import Flask, render_template, redirect
#
# app = Flask(__name__,template_folder='templates')
#
# qrfolder = os.path.join('static', 'img')
# app.config['QR_FOLDER'] = qrfolder
#
# ''' setting referals '''
# refs = {"yo":"https://api.WhatsApp.com/send?phone=+972512170493"}
# refs["yoo"] = "https://web.WhatsApp.com/send?phone=+972512170493"
#
# @app.route('/')
# def hello_world():
# master = Master.shares[0]
# full_filename = os.path.join(app.config['QR_FOLDER'], "QR"+str(master.lastQR)+".png")
# if master.status == "LoggedIn":
# return render_template("loggedIn.html", user_image = full_filename, status = master.status)
# else:
# return render_template("index.html", user_image = full_filename, status = master.status)
#
# @app.route('/<path:text>', methods=['GET', 'POST'])
# def all_routes(text):
# if "exit" in text:
# print("EXITTT")
# print("EXITTT")
# print("EXITTT")
# print("EXITTT")
# return redirect("https://chat.whatsapp.com/JmnYDofCd7v0cXzfBgcVDO")
# return render_template("exit.html", user_image = "full_filename", status = "s")
#
#
# if text in refs:
# return redirect(refs[text])
# else:
# return redirect("/")
#
#
# #
# # if __name__ == '__main__':
# # print(
# # '''
# # ===================================
# # Running Front Server
# # ===================================
# # '''
# # )
# # app.run(debug=True, host='0.0.0.0',use_reloader=False)
# # else:
# # print("################################")
# # print("################################")
# # print("################################")
# # print("################################")
# # print("################################")
# # print("################################")
# #
#
# def flaskRun(master):
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# print("GONNA RUN ASYNC")
# global running
# # if reminder.runners < 1 and running < 1:
# if True:
# # running += 1
# # reminder.runners += 1
# t = Thread(target=flaskRunAsync,args=[master,])
# t.start()
# else:
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print(runners,"!!!!!!!!!!!!!!!!!!!!!!!!!RUNNERS")
# print("AFTER GONNA RUN ASYNC")
# print("AFTER GONNA RUN ASYNC")
# print("AFTER GONNA RUN ASYNC")
# print("AFTER GONNA RUN ASYNC")
#
#
# def flaskRunAsync(data):
# master = data
# # input()
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# print("AAAAAAAAAAAA ASYNC")
# master = Master()
# master = Master.shares[0]
# print("9999999999999999999999999999")
# print("9999999999999999999999999999")
# print("9999999999999999999999999999")
# print("9999999999999999999999999999")
#
#
#
# if __name__ == '__main__':
# flaskRun(master)
# print("STARTING APP")
# # print("STARTING APP")
# # print("STARTING APP")
# # print("STARTING APP")
# # print("STARTING APP")
# if self.runLocal :
# pass
# app.run(debug=True, host='0.0.0.0',use_reloader=False)
# # app.run(debug=True, host='0.0.0.0',use_reloader=False)
# else:
# flaskRun(master)
# if self.runLocal :
# pass
# app.run(debug=True, host='0.0.0.0',use_reloader=False)
# # app.run(debug=True, host='0.0.0.0',use_reloader=False)
# print("STARTING APP22222222222")
# # print("STARTING APP22222222222")
# # print("STARTING APP22222222222")
# # print("STARTING APP22222222222")
# # print("STARTING APP22222222222")
# # print("STARTING APP22222222222")
|
data_batcher.py | # -*- coding: utf-8 -*-
import numpy as np
import pickle
import time
from queue import Queue
from threading import Thread
from .data_loader import DataLoader
CHUNK_NUM = 20
class DataBatcher(object):
"""
Data batcher with queue for loading big dataset
"""
def __init__(self, data_dir, file_list, batch_size, num_epoch, shuffle=False):
self.data_dir = data_dir
self.file_list = file_list
self.batch_size = batch_size
self.num_epoch = num_epoch
self.shuffle = shuffle
self.cur_epoch = 0
self.loader_queue = Queue(maxsize=CHUNK_NUM)
self.loader_queue_size = 0
self.batch_iter = self.batch_generator()
self.input_gen = self.loader_generator()
# Start the threads that load the queues
self.loader_q_thread = Thread(target=self.fill_loader_queue)
self.loader_q_thread.setDaemon(True)
self.loader_q_thread.start()
# Start a thread that watches the other threads and restarts them if they're dead
self.watch_thread = Thread(target=self.monitor_threads)
self.watch_thread.setDaemon(True)
self.watch_thread.start()
def get_batch(self):
try:
batch_data, local_size = next(self.batch_iter)
except StopIteration:
batch_data = None
local_size = 0
return batch_data, local_size
def get_epoch(self):
return self.cur_epoch
def full(self):
if self.loader_queue_size == CHUNK_NUM:
return True
else:
return False
def batch_generator(self):
while self.loader_queue_size > 0:
data_loader = self.loader_queue.get()
n_batch = data_loader.n_batch
self.loader_queue_size -= 1
for batch_idx in range(n_batch):
batch_data, local_size = data_loader.get_batch(batch_idx=batch_idx)
yield batch_data, local_size
def loader_generator(self):
for epoch in range(self.num_epoch):
self.cur_epoch = epoch
if self.shuffle:
np.random.shuffle(self.file_list)
for idx, f in enumerate(self.file_list):
reader = open("%s/%s" % (self.data_dir, f), 'br')
q_dict = pickle.load(reader)
data_loader = DataLoader(batch_size=self.batch_size)
data_loader.feed_by_data(q_dict)
yield data_loader
def fill_loader_queue(self):
while True:
if self.loader_queue_size <= CHUNK_NUM:
try:
data_loader = next(self.input_gen)
self.loader_queue.put(data_loader)
self.loader_queue_size += 1
except StopIteration:
break
def monitor_threads(self):
"""Watch loader queue thread and restart if dead."""
while True:
time.sleep(60)
if not self.loader_q_thread.is_alive(): # if the thread is dead
print('Found loader queue thread dead. Restarting.')
new_t = Thread(target=self.fill_loader_queue)
self.loader_q_thread = new_t
new_t.daemon = True
new_t.start()
|
newhacu1.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from os import system, name
import itertools
import threading
import time
import sys
import datetime
from base64 import b64decode,b64encode
from datetime import date
expirydate = datetime.date(2022, 1, 13 )
#expirydate = datetime.date(2021, 12, 30)
today=date.today()
def hero():
def chalo():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']) :
if done:
break
sys.stdout.write('\rconnecting to server for next colour--------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def chalo1():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rgetting the colour wait --------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
clear()
y=1
newperiod=period
banner='figlet Rxce 7.o '
numbers=[]
i=1
while(y):
clear()
system(banner)
print("Contact me on telegram @Hackmgk")
print("Enter" ,newperiod,"Price :")
current=input()
current=int(current)
chalo()
print("\n---------Successfully Connected to the server-----------")
chalo1()
print("\n---------Successfully got the colour -------------")
print('\n')
with st.spinner('In Progress....'):
d=pd.read_excel("1234.xlsx")
#clf = svm.SVC(kernel="")
#clf = DecisionTreeClassifier(random_state=0)
PRICE=getSum(current)
X=d[['A'+PRICE+'B']]
y=d['D']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.001)
#st.write("The shape is ",d.shape)
clf = LogisticRegression(random_state=0).fit(X, y)
#st.write("You selected ",s_type)
p=clf.predict([[a,b]])
if p%2==0:
#r=r+1
st.success("Next is GREEN")
elif p%2==1:
#g=g+1
st.error("Next is RED")
i=i+1
newperiod+=1
numbers.append(current)
y=input("Do you want to play : Press 1 and 0 to exit \n")
if(y==0):
y=False
if (len(numbers)>15):
clear()
system('figlet Thank you!!')
print("Play on next specified time!!")
print("-----------Current Time UP----------")
sys.exit(" \n \n \n Contact on Telegram @Hackmgk")
print(numbers)
if(expirydate>today):
now = datetime.datetime.now()
First = now.replace(hour=10, minute=55, second=0, microsecond=0)
Firstend = now.replace(hour=11, minute=35, second=0, microsecond=0)
Second = now.replace(hour=13, minute=55, second=0, microsecond=0)
Secondend = now.replace(hour=14, minute=35, second=0, microsecond=0)
Third = now.replace(hour=16, minute=55, second=0, microsecond=0)
Thirdend = now.replace(hour=17, minute=35, second=0, microsecond=0)
Final = now.replace(hour=20, minute=55, second=0, microsecond=0)
Finalend = now.replace(hour=21, minute=35, second=0, microsecond= 0)
FFinal = now.replace(hour=22, minute=55, second=0, microsecond= 0)
FFinalend = now.replace(hour=23, minute=35, second=0, microsecond= 0)
if (now>First and now<Firstend):
period=220
hero()
elif(now>Second and now<Secondend):
period=280
hero()
elif(now>Third and now<Thirdend):
period=340
hero()
elif(now>Final and now<Finalend):
period=420
hero()
elif(now>FFinal and now<FFinalend):
period=460
hero()
else:
banner='figlet Rxce 7.o '
print("Hi!! Thanks for buying Life time the hack")
print("----------Your play time-----------")
print(" 11:00 PM- 11:35 PM")
print(" 02:00 PM- 02:35 PM")
print(" 05:00 PM- 05:35 PM")
print(" 09:00 PM- 09:35 PM")
print(" 11:00 PM- 12:35 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print(" admin on telegram @Hackmgk ")
else:
banner='figlet Thank '
system(banner)
print("*---------*----------*-------------*----------*")
print("Your hack has expired--- Please contact")
print(" on telegram ----@hackmgk for activating")
print(" Recharge Amount : Total limit " )
print(" 2. 3000 INR ------- 30 Days")
print("*---------*----------*-------------*----------*")
print("Your custom hack can be made request from us.")
print( "Msg me on telegram @hackmgk")
|
selector.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import atexit, time, errno, os
from compat import select, SelectError, set, selectable_waiter, format_exc
from threading import Thread, Lock
from logging import getLogger
log = getLogger("qpid.messaging")
class Acceptor:
def __init__(self, sock, handler):
self.sock = sock
self.handler = handler
def fileno(self):
return self.sock.fileno()
def reading(self):
return True
def writing(self):
return False
def readable(self):
sock, addr = self.sock.accept()
self.handler(sock)
class Selector:
lock = Lock()
DEFAULT = None
_current_pid = None
@staticmethod
def default():
Selector.lock.acquire()
try:
if Selector.DEFAULT is None or Selector._current_pid != os.getpid():
sel = Selector()
atexit.register(sel.stop)
sel.start()
Selector.DEFAULT = sel
Selector._current_pid = os.getpid()
return Selector.DEFAULT
finally:
Selector.lock.release()
def __init__(self):
self.selectables = set()
self.reading = set()
self.writing = set()
self.waiter = selectable_waiter()
self.reading.add(self.waiter)
self.stopped = False
self.thread = None
self.exception = None
def wakeup(self):
self.waiter.wakeup()
def register(self, selectable):
self.selectables.add(selectable)
self.modify(selectable)
def _update(self, selectable):
if selectable.reading():
self.reading.add(selectable)
else:
self.reading.discard(selectable)
if selectable.writing():
self.writing.add(selectable)
else:
self.writing.discard(selectable)
return selectable.timing()
def modify(self, selectable):
self._update(selectable)
self.wakeup()
def unregister(self, selectable):
self.reading.discard(selectable)
self.writing.discard(selectable)
self.selectables.discard(selectable)
self.wakeup()
def start(self):
self.stopped = False
self.thread = Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start();
def run(self):
try:
while not self.stopped:
wakeup = None
for sel in self.selectables.copy():
t = self._update(sel)
if t is not None:
if wakeup is None:
wakeup = t
else:
wakeup = min(wakeup, t)
rd = []
wr = []
ex = []
while True:
try:
if wakeup is None:
timeout = None
else:
timeout = max(0, wakeup - time.time())
rd, wr, ex = select(self.reading, self.writing, (), timeout)
break
except SelectError, e:
# Repeat the select call if we were interrupted.
if e[0] == errno.EINTR:
continue
else:
# unrecoverable: promote to outer try block
raise
for sel in wr:
if sel.writing():
sel.writeable()
for sel in rd:
if sel.reading():
sel.readable()
now = time.time()
for sel in self.selectables.copy():
w = sel.timing()
if w is not None and now > w:
sel.timeout()
except Exception, e:
self.exception = e
info = format_exc()
log.error("qpid.messaging I/O thread has died: %s" % str(e))
for sel in self.selectables.copy():
if hasattr(sel, "abort"):
sel.abort(e, info)
raise
def stop(self, timeout=None):
self.stopped = True
self.wakeup()
self.thread.join(timeout)
self.thread = None
|
calibrate.py | """
This file contains code for calibrating camera
Saving calibration and collecting old calibrations
"""
import pickle as pkl
import os
import threading
import cv2
import time
import mediapipe as mp
class Status():
start_calculate = False
stop = False
result = (0, 0)
def calibrate(camera):
"""Calculate appropriate detection confidence for camera used!
Protocol 1. show hand press enter, 2. show back of hand press enter. """
calibration = None
calibration = collect_if_exist()
if calibration is not None:
return calibration
else:
status = Status()
thread = threading.Thread(target = fast_cam_cap, args = (camera, status, ))
thread.start()
# Start calibration routine:
input("Hold hand in front of camera with palm clearly visible, then press enter with other hand!")
status.start_calculate = True
thread.join()
status.start_calculate = False
time.sleep(1)
high = calculate_higher_accuracy(camera)
time.sleep(1)
thread2 = threading.Thread(target = fast_cam_cap, args = (camera, status, ))
thread2.start()
# Calculate lower accuracy
input("Now flip the hand so that palm faces away from camera, then press enter again!")
status.start_calculate = True
thread2.join()
time.sleep(1)
# Calculate lower accuracy
low = calculate_lower_accuracy(camera, high)
# Save to file
print("Save result",(high,low), "to file!")
save_calibration((high,low))
return (high,low)
def save_calibration(res):
with open('./data/calibration.pkl', 'wb') as handle:
pkl.dump(res, handle, protocol=pkl.HIGHEST_PROTOCOL)
def calculate_lower_accuracy(camera, high):
confidence_low=high
confidence_change_rate = 0.01
current = confidence_low - confidence_change_rate
amount_of_success = 10 # number of detections needed to pass
amount_of_failure = 3
_success = 1
failure = 1
# initiate
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(camera)
while cap.isOpened() and _success < amount_of_success:
if(failure == amount_of_failure):
failure = 1
current = current- confidence_change_rate
print("Calibration calculation of Low at: ", current)
hands = mp_hands.Hands(min_detection_confidence=current, min_tracking_confidence=0.5)
success, image = cap.read()
if not success:
continue
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
if results.multi_hand_landmarks is not None:
_success = 1
failure+=1
else:
_success+=1
failure = 1
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Default', image)
cv2.waitKey(1)
hands.close()
cap.release()
cv2.destroyWindow("Default")
print("Low rate calculated to", current)
return current
def calculate_higher_accuracy(camera):
confidence_high=1
confidence_change_rate = 0.01
current = confidence_high
amount_of_success = 3 # number of detections in a row! to pass
amount_of_failure = 3
_success = 1
failure = 1
# initiate
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(camera)
while cap.isOpened() and _success < amount_of_success:
if(failure == amount_of_failure):
failure = 1
current = current- confidence_change_rate
print("Calibration calculation of High at: ", current)
hands = mp_hands.Hands(min_detection_confidence=current, min_tracking_confidence=0.5)
success, image = cap.read()
if not success:
continue
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
if results.multi_hand_landmarks is not None:
#print("success",_success)
_success+=1
failure = 1
else:
_success = 1
failure+=1
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Default', image)
cv2.waitKey(1)
hands.close()
cap.release()
cv2.destroyWindow("Default")
print("High rate calculated to", current)
return current
def fast_cam_cap(camera, status):
cap = cv2.VideoCapture(camera)
while cap.isOpened() and not status.start_calculate:
success, image = cap.read()
if not success:
continue
cv2.imshow('Default', image)
cv2.waitKey(1)
cap.release()
cv2.destroyWindow("Default")
def collect_if_exist(path="./data/calibration."):
calibration_values = ()
try:
with open('./data/calibration.pkl', 'rb') as fp:
calibration_values = pkl.load(fp)
except (FileNotFoundError, EOFError):
if (os.path.isdir('./data/')):
return None
else:
os.mkdir('./data/')
return None
print("Collected calibration : ", calibration_values)
return calibration_values
# Debug testing
#print("calibration", calibrate(0)) |
multi_adb.py | #!/usr/bin/env python3
# coding: utf-8
from __future__ import print_function, unicode_literals
from distutils.spawn import find_executable
import asyncio
import os
import re
import subprocess
import sys
from threading import Thread
import time
adb_path = find_executable("adb")
if not adb_path:
print('adb path not found.')
exit(-1)
def mkdir(dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def connect(device):
process = subprocess.Popen([adb_path, 'connect', device], stdout=subprocess.PIPE)
while True:
line = process.stdout.readline().decode('utf-8').strip()
if not line:
break
if line.strip() and line.startswith('connected to'):
return True
return False
def disconnect(device):
process = subprocess.Popen([adb_path, 'disconnect', device], stdout=subprocess.PIPE)
while True:
line = process.stdout.readline().decode('utf-8').strip()
if not line:
break
if line.strip() and line.startswith('disconnected'):
return True
return False
class Device:
def __init__(self, name, name_short):
self.__name = name
self.__name_short = name_short
self.__loop = None
self.__thread = None
self.__running = False
self.__connected = True
self.__auto_connect = False
if name == name_short:
self.__auto_connect = True
def __del__(self):
if self.__loop is not None:
self.__loop.stop()
self.__loop.close()
self.__loop = None
if self.__thread is not None:
self.__thread.join()
self.__thread = None
def __run_forever(self):
self.__loop.run_forever()
# set exit point
self.__running = False
def start(self):
if self.__loop is not None:
return
self.__running = True
self.__loop = asyncio.new_event_loop()
self.__thread = Thread(target=self.__run_forever)
self.__thread.start()
def stop(self):
self.__loop.call_soon_threadsafe(self.__stop_in_loop_thread)
def __get_name(self):
return "[" + self.__name + "]"
def execute(self, command):
self.start()
self.__loop.call_soon_threadsafe(self.__run_in_loop_thread, command)
def __stop_in_loop_thread(self):
self.__loop.stop()
def wait_stop(self):
while self.__running:
pass
def __run_in_loop_thread(self, command):
if command.find("{device}") >= 0:
command = command.format(device=self.__name)
if command.find("{output_device}") >= 0:
# use name_short here to avoid invalid ip:port path
command = command.format(output_device="output/"+self.__name_short)
mkdir("output/"+self.__name_short)
print(self.__get_name() + " execute: " + command)
cmd = command.split(" ")
if cmd[0] == "sleep":
time.sleep(float(cmd[1]))
return
if cmd[0] == "connect":
self.__run_connect()
return
if cmd[0] == "disconnect":
self.__run_disconnect()
return
if cmd[0] == "root":
self.__run_root()
return
self.__run_normal(command)
def __run_connect(self):
if self.__connected:
return
if self.__auto_connect:
self.__connected = True
return
if connect(self.__name):
self.__connected = True
return
print(self.__get_name() + " connect fail")
exit(1)
def __run_disconnect(self):
if not self.__connected:
return
if self.__auto_connect:
return
if disconnect(self.__name):
self.__connected = False
return
print(self.__get_name() + " disconnect fail")
exit(1)
def __run_root(self):
process = subprocess.Popen([adb_path, '-s', self.__name, 'root'], stdout=subprocess.PIPE)
while True:
line = process.stdout.readline().decode('utf-8').strip()
if not line:
break
line = line.strip()
if line != 'adbd is already running as root':
self.__connected = False
def __run_normal(self, command):
if sys.version_info >= (3, 5):
subprocess.run("\"" + adb_path + "\"" + " -s " + self.__name + " " + command, shell=True)
else:
os.system("\"" + adb_path + "\"" + " -s " + self.__name + " " + command)
class Devices:
def __init__(self):
self.__devices = []
def __str__(self):
return self.__devices.__str__()
def append(self, device):
if not self.has_device(device['serial']):
device['object'] = Device(device['serial'],
device['serial_short'])
self.__devices.append(device)
def has_device(self, device_name):
for device in self.__devices:
if device['serial'] == device_name:
return True
if device['serial_short'] == device_name:
return True
return False
def execute(self, command):
for device in self.__devices:
device['object'].execute(command)
def stop(self):
for device in self.__devices:
device['object'].stop()
def wait_stop(self):
for device in self.__devices:
device['object'].wait_stop()
class Adb:
def __init__(self):
self.__devices = Devices()
self.read_devices()
@staticmethod
def get_key_value(var, key):
if not var.startswith(key):
print("key " + key + " not found in " + var)
sys.exit(1)
return var[len(key) + 1:]
@staticmethod
def get_serial_short(serial):
pos = serial.find(':')
if pos < 0:
return serial
short = serial[0: pos]
short = short.strip()
return short
def read_devices(self):
process = subprocess.Popen([adb_path, 'devices', '-l'], stdout=subprocess.PIPE)
while True:
line = process.stdout.readline().decode('utf-8').strip()
if not line:
break
if line.strip() and not line.startswith('List of devices'):
d = re.split(r'\s+', line.strip())
if len(d) == 7:
# serial "device" usb product model device transport_id
self.__devices.append({
'serial': d[0],
'serial_short': Adb.get_serial_short(d[0]),
'usb': Adb.get_key_value(d[2], "usb"),
'product': Adb.get_key_value(d[3], "product"),
'model': Adb.get_key_value(d[4], "model"),
'device': Adb.get_key_value(d[5], "device"),
'transport_id': Adb.get_key_value(d[6], "transport_id")
})
elif len(d) == 6:
# serial "device" product model device transport_id
self.__devices.append({
'serial': d[0],
'serial_short': Adb.get_serial_short(d[0]),
'usb': "",
'product': Adb.get_key_value(d[2], "product"),
'model': Adb.get_key_value(d[3], "model"),
'device': Adb.get_key_value(d[4], "device"),
'transport_id': Adb.get_key_value(d[5], "transport_id")
})
elif len(d) == 5:
# serial "device" product model device
self.__devices.append({
'serial': d[0],
'serial_short': Adb.get_serial_short(d[0]),
'usb': "",
'product': Adb.get_key_value(d[2], "product"),
'model': Adb.get_key_value(d[3], "model"),
'device': Adb.get_key_value(d[4], "device"),
'transport_id': ""
})
else:
if d[1] == "offline":
# only : serial "offline" transport_id
continue
print(line + " not support")
sys.exit(1)
def connect_devices(self):
if not os.path.exists("device.txt"):
return
with open("device.txt", "r") as f:
for line in f:
if line.startswith("#"):
# support minimum comment
continue
line = line.strip("\n")
line = line.strip("\r")
if len(line) == 0:
continue
if not self.__devices.has_device(line):
connect(line)
# reset
self.__devices = Devices()
self.read_devices()
print(self.__devices)
def run(self, filename):
with open(filename, "r") as f:
for line in f:
if line.startswith("#"):
# support minimum comment
continue
line = line.strip("\n")
line = line.strip("\r")
if len(line) == 0:
continue
self.__devices.execute(line)
self.__devices.stop()
self.__devices.wait_stop()
if __name__ == '__main__':
adb = Adb()
adb.connect_devices()
if len(sys.argv) == 1:
adb.run("command.txt")
else:
adb.run(sys.argv[1])
|
processor.py | # MIT License
#
# Copyright (c) 2021 Javier Alonso <jalonso@teldat.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The class :class:`Processor` is responsible for handing queues, objects and petitions.
Alongside with :class:`Manager <orcha.lib.Manager>`, it's the heart of the orchestrator.
"""
import multiprocessing
import random
import signal
import subprocess
from queue import PriorityQueue, Queue
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, List, Optional, Union
import systemd.daemon as systemd
from orcha import properties
from orcha.interfaces.message import Message
from orcha.interfaces.petition import EmptyPetition, Petition, WatchdogPetition
from orcha.utils.cmd import kill_proc_tree
from orcha.utils.logging_utils import get_logger
log = get_logger()
class Processor:
"""
:class:`Processor` is a **singleton** whose responsibility is to handle and manage petitions
and signals collaborating with the corresponding :class:`Manager`. This class has multiple
queues and threads for handling incoming requests. The following graph intends to show how
it works internally::
┌─────────────────┐
| ├───────────────────────────────┐ ╔════════════════╗
| Processor() ├──────┬───────────────┐ ├──────►║ Message thread ║
| | | | | ╚═════╦══════════╝
└┬───────┬────────┘ | | | ▲ ║ ╔═══════════════╗
| | | | └─────────╫───╫──►║ Signal thread ║
| | | | ║ ║ ╚═══════╦═══════╝
| | | | ║ ║ t ▲ ║
| ▼ | ▼ ║ ║ o ║ ║
| ┌───────────┐ send(m) | ┌─────────────────────────┐ ║ ║ ║ ║
| | Manager() ╞═════════╪═► Message queue (proxy) ═════╝ ║ p ║ ║
| └─────╥─────┘ ▼ └─────────────────────────┘ ║ e ║ ║
| ║ finish(m) ┌──────────────────────────┐ ║ t ║ ║
| ╚═════════════► Signal queue (proxy) ════════════║═════╝ ║
| └──────────────────────────┘ ║ i ║
| ║ t ║
| Priority queue ║ i ║
| ┌─────────────────────────┐ ║ o ║
| ╔═══════ Internal petition queue ◄═══════╦════╝ n ║
| ║ └─────────────────────────┘ ║ ║
| ║ ┌─────────────────────────┐ ║ ║
| ║ ╔══ Internal signal queue ◄═══║════════════════╝
| ║ ║ └─────────────────────────┘ ║
| ║ ║ ║ not
| ▼ ╚══════╗ ║ p.condition(p)
| ╔══════════════════════════╗ ║ ╔══════════════════╩═════╗
├►║ Internal petition thread ╠═║══════►║ Petition launch thread ║
| ╚══════════════════════════╝ ▼ ╚══════════════════╤═════╝
| ╔════════════════════════╗ ▲ | ┌─────────────────────┐
└──────►║ Internal signal thread ╠═════════════════╝ ├─►| manager.on_start(p) |
╚════════════════════════╝ send SIGTERM | └─────────────────────┘
| ┌─────────────────┐
├──►| p.action(fn, p) |
| └─────────────────┘
| ┌──────────────────────┐
└►| manager.on_finish(p) |
└──────────────────────┘
Note:
Ideally, you don't need to create any instance for this class, as it is completely
managed by :class:`Manager` (in particular, see
:attr:`processor <orcha.lib.Manager.processor>`). The diagram above is posted for
informational purposes, as this class is big and can be complex in some situations
or without knowledge about multiprocessing. Below a detailed explanation on how
it works is added to the documentation so anyone can understand the followed
process.
1. **Queues**
The point of having four :py:class:`queues <queue.Queue>` is that messages are travelling
across threads in a safe way. When a message is received from another process, there is
some "black magic" going underneath the
:py:class:`BaseManager <multiprocessing.managers.BaseManager>` class involving pipes, queues
and other synchronization mechanisms.
With that in mind, take into account that messages are not received (yet) by our
process but by the manager server running on another IP and port, despite the fact that
the manager is ours.
That's why a
`proxy <https://docs.python.org/3/library/multiprocessing.html#proxy-objects>`_
object is involved in the entire equation. For summarizing, a proxy object is an object
that presumably lives in another process. In general, writing or reading data from a
proxy object causes every other process to notice our action (in terms that a new item
is now available for everyone, a deletion happens for all of them, etc).
If we decide to use :py:class:`queues <multiprocessing.Queue>` instead, additions
or deletions won't be propagated to the rest of the processes as it is a local-only
object.
For that reason, there is four queues: two of them have the mission of receiving
the requests from other processes and once the request is received by us and is
available on our process, it is then added to an internal priority queue by the
handler threads (allowing, for example, sorting of the petitions based on their
priority, which wouldn't be possible on a proxied queue).
2. **Threads**
As you may notice, there is almost two threads per queue: one is a **producer** and
the other one is the **consumer** (following the producer/consumer model). The need
of so much threads (5 at the time this is being written) is **to not to block** any
processes and leave the orchestrator free of load.
As the queues are synchronous, which means that the thread is forced to wait until
an item is present (see :py:attr:`Queue.get() <queue.Queue.get>`), waiting for petitions
will pause the entire main thread until all queues are unlocked sequentially, one after
each other, preventing any other request to arrive and being processed.
That's the reason why there are two threads just listening to proxied queues and placing
the requests on another queue. In addition, the execution of the action is also run
asynchronously in order to not to block the main thread during the processing (this
also applies to the evaluation of the :attr:`condition <orcha.interfaces.Petition.condition>`
predicate).
Each time a new thread is spawned for a :class:`Petition`, it is saved on a list of
currently running threads. There is another thread running from the start of the
:class:`Process` which is the **garbage collector**, whose responsibility is to
check which threads on that list have finished and remove them when that happens.
Warning:
When defining your own :attr:`action <orcha.interfaces.Petition.action>`, take special
care on what you will be running as any deadlock may block the entire pipeline
forever (which basically is what deadlocks does). Your thread must be error-free
or must include a proper error handling on the **server manager object**.
This also applies when calling :func:`shutdown`, as the processor will wait until
all threads are done. In case there is any deadlock in there, the processor will
never end and you will have to manually force finish it (which may cause zombie
processes or memory leaks).
.. versionadded:: 0.1.7
Processor now supports an attribute :attr:`look_ahead <orcha.lib.Processor.look_ahead>`
which allows defining an amount of items that will be pop-ed from the queue,
modifying the default behavior of just obtaining a single item.
.. versionadded:: 0.1.8
Manager calls to :func:`on_start <orcha.lib.Manager.on_start>` and
:func:`on_finish <orcha.lib.Manager.on_finish>` are performed in a mutex environment,
so there is no need to do any kind of extra processing at the
:func:`condition <orcha.interfaces.Petition.condition>` function. Nevertheless, the
actual action run there should be minimal as it will block any other process.
.. versionadded:: 0.1.9
Processor supports a new attribute
:attr:`notify_watchdog <orcha.lib.Processor.notify_watchdog>`
that defines if the processor shall create a background thread that takes care of
notifying systemd about our status and, if dead, to restart us.
Args:
queue (multiprocessing.Queue, optional): queue in which new :class:`Message` s are
expected to be. Defaults to :obj:`None`.
finishq (multiprocessing.Queue, optional): queue in which signals are expected to be.
Defaults to :obj:`None`.
manager (:class:`Manager`, optional): manager object used for synchronization and action
calling. Defaults to :obj:`None`.
look_ahead (:obj:`int`, optional): amount of items to look ahead when querying the queue.
Having a value higher than 1 allows the processor to access items further in the queue
if, for any reason, the next one is not available yet to be executed but the second
one is (i.e.: if you define priorities based on time, allow the second item to be
executed before the first one). Take special care with this parameter as this may
cause starvation in processes.
notify_watchdog (:obj:`bool`, optional): if the service is running under systemd,
notify periodically (every 5 seconds) that we are alive and doing things. If there
is any kind of unexpected error, a watchdog trigger will be set and the service
will be restarted.
Raises:
ValueError: when no arguments are given and the processor has not been initialized yet.
"""
__instance__ = None
def __new__(cls, *args, **kwargs):
if Processor.__instance__ is None:
instance = object.__new__(cls)
instance.__must_init__ = True
Processor.__instance__ = instance
return Processor.__instance__
def __init__(
self,
queue: multiprocessing.Queue = None,
finishq: multiprocessing.Queue = None,
manager=None,
look_ahead: int = 1,
notify_watchdog=False,
):
if self.__must_init__:
if not all((queue, finishq, manager)):
raise ValueError("queue & manager objects cannot be empty during init")
self.lock = Lock()
self.queue = queue
self.finishq = finishq
self.manager = manager
self.look_ahead = look_ahead
self.running = True
self.notify_watchdog = notify_watchdog
self._internalq = PriorityQueue()
self._signals = Queue()
self._threads: List[Thread] = []
self._petitions: Dict[int, int] = {}
self._gc_event = Event()
self._pred_lock = Lock()
self._process_t = Thread(target=self._process)
self._internal_t = Thread(target=self._internal_process)
self._finished_t = Thread(target=self._signal_handler)
self._signal_t = Thread(target=self._internal_signal_handler)
self._gc_t = Thread(target=self._gc)
self._wd_t = Thread(target=self._notify_watchdog)
self._process_t.start()
self._internal_t.start()
self._finished_t.start()
self._signal_t.start()
self._gc_t.start()
if self.notify_watchdog:
self._wd_t.start()
self.__must_init__ = False
@property
def running(self) -> bool:
"""Whether if the current processor is running or not"""
return self._running
@running.setter
def running(self, v: bool):
with self.lock:
self._running = v
def exists(self, m: Union[Message, int, str]) -> bool:
"""
Checks if the given message is running or not.
.. versionchanged:: 0.1.6
Attribute :attr:`m` now supports a :obj:`str` as ID.
Args:
m (:obj:`Message` | :obj:`int` | :obj:`str`]): the message to check or its
:attr:`id <orcha.interfaces.Message.id>` (if :obj:`int` or :obj:`str`).
Returns:
bool: :obj:`True` if running, :obj:`False` if not.
Note:
A message is considered to not exist iff **it's not running**, but can
be enqueued waiting for its turn.
"""
return self.manager.is_running(m)
def enqueue(self, m: Message):
"""Shortcut for::
processor.queue.put(message)
Args:
m (Message): the message to enqueue
"""
self.queue.put(m)
def finish(self, m: Union[Message, int, str]):
"""Sets a finish signal for the given message.
.. versionchanged:: 0.1.6
Attribute :attr:`m` now supports a :obj:`str` as ID.
Args:
m (:obj:`Message` | :obj:`int` | :obj:`str`): the message or its
:attr:`id <orcha.interfaces.Message.id>` (if :obj:`int` or :obj:`str`).
"""
if isinstance(m, Message):
m = m.id
log.debug("received petition for finish message with ID %s", m)
self.finishq.put(m)
def _process(self):
log.debug("fixing internal digest key")
multiprocessing.current_process().authkey = properties.authkey
try:
while self.running:
log.debug("waiting for message...")
m = self.queue.get()
if m is not None:
log.debug('converting message "%s" into a petition', m)
p: Optional[Petition] = self.manager.convert_to_petition(m)
if p is not None:
log.debug("> %s", p)
if self.exists(p.id):
log.warning("received message (%s) already exists", p)
p.queue.put(f'message with ID "{p.id}" already exists\n')
p.queue.put(1)
continue
else:
log.debug('message "%s" is invalid, skipping...', m)
continue
else:
p = EmptyPetition()
self._internalq.put(p)
except Exception as e:
log.fatal("unhandled exception: %s", e)
self.running = False
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
def _internal_process(self):
try:
while self.running:
log.debug("waiting for internal petition...")
empty = False
items_to_enqueue = []
log.debug("looking ahead %d items", self.look_ahead)
for i in range(1, self.look_ahead + 1):
p: Petition = self._internalq.get()
if not isinstance(p, (EmptyPetition, WatchdogPetition)):
log.debug('adding petition "%s" to list of possible petitions', p)
items_to_enqueue.append(p)
elif isinstance(p, EmptyPetition):
log.debug("received empty petition")
empty = True
break
elif self.notify_watchdog and isinstance(p, WatchdogPetition):
log.debug("received watchdog request [WD is enabled for this instance]")
systemd.notify("WATCHDOG=1")
if i > self._internalq.qsize():
break
for item in items_to_enqueue:
log.debug('creating thread for petition "%s"', item)
launch_t = Thread(target=self._start, args=(item,))
launch_t.start()
self._threads.append(launch_t)
if not empty:
sleep(random.uniform(0.5, 5))
log.debug("internal process handler finished")
except Exception as e:
log.fatal("unhandled exception: %s", e)
self.running = False
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
def _start(self, p: Petition):
log.debug('launching petition "%s"', p)
def assign_pid(proc: Union[subprocess.Popen, int]):
pid = proc if isinstance(proc, int) else proc.pid
log.debug('assigning pid to "%s"', pid)
self._petitions[p.id] = pid
with self._pred_lock:
if not p.condition(p):
log.debug('petition "%s" did not satisfy the condition, re-adding to queue', p)
self._internalq.put(p)
self._gc_event.set()
return
log.debug('petition "%s" satisfied condition', p)
self.manager.on_start(p)
try:
p.action(assign_pid, p)
except Exception as e:
log.warning(
'unhandled exception while running petition "%s" -> "%s"', p, e, exc_info=True
)
finally:
log.debug('petition "%s" finished, triggering callbacks', p)
self._petitions.pop(p.id, None)
self._gc_event.set()
with self._pred_lock:
self.manager.on_finish(p)
def _signal_handler(self):
log.debug("fixing internal digest key")
multiprocessing.current_process().authkey = properties.authkey
try:
while self.running:
log.debug("waiting for finish message...")
m = self.finishq.get()
self._signals.put(m)
except Exception as e:
log.fatal("unhandled exception: %s", e)
self.running = False
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
def _internal_signal_handler(self):
try:
while self.running:
log.debug("waiting for internal signal...")
m = self._signals.get()
if isinstance(m, Message):
m = m.id
if m is not None:
log.debug('received signal petition for message with ID "%s"', m)
if m not in self._petitions:
log.warning('message with ID "%s" not found or not running!', m)
continue
pid = self._petitions[m]
kill_proc_tree(pid, including_parent=False, sig=signal.SIGINT)
log.debug('sent signal to process "%d" and all of its children', pid)
except Exception as e:
log.fatal("unhandled exception: %s", e)
self.running = False
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
def _gc(self):
try:
while self.running:
self._gc_event.wait()
self._gc_event.clear()
for thread in self._threads:
if not thread.is_alive():
log.debug('pruning thread "%s"', thread)
self._threads.remove(thread)
except Exception as e:
log.fatal("unhandled exception: %s", e)
self.running = False
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
def _notify_watchdog(self):
while self.running and self.notify_watchdog:
self._internalq.put(WatchdogPetition())
sleep(5)
def shutdown(self):
"""
Finishes all the internal queues and threads, waiting for any pending requests to
finish (they are not interrupted by default, unless the signal gets propagated).
This method must be called when finished all the server operations.
"""
try:
log.info("finishing processor")
self.running = False
self.queue.put(None)
self.finishq.put(None)
self._gc_event.set()
log.info("waiting for pending processes...")
self._process_t.join()
self._internal_t.join()
log.info("waiting for pending signals...")
self._finished_t.join()
self._signal_t.join()
log.info("waiting for garbage collector...")
self._gc_t.join()
log.info("waiting for pending operations...")
for thread in self._threads:
thread.join()
log.info("finished")
except Exception as e:
log.critical("unexpected error during shutdown! -> %s", e, exc_info=True)
if self.notify_watchdog:
systemd.notify(f"STATUS=Failure due to unexpected exception - {e}")
systemd.notify("WATCHDOG=trigger")
__all__ = ["Processor"]
|
test_nntplib.py | import io
import socket
import datetime
import textwrap
import unittest
import functools
import contextlib
import os.path
import threading
from test import support
from nntplib import NNTP, GroupInfo
import nntplib
from unittest.mock import patch
try:
import ssl
except ImportError:
ssl = None
TIMEOUT = 30
certfile = os.path.join(os.path.dirname(__file__), 'keycert3.pem')
# TODO:
# - test the `file` arg to more commands
# - test error conditions
# - test auth and `usenetrc`
class NetworkedNNTPTestsMixin:
def test_welcome(self):
welcome = self.server.getwelcome()
self.assertEqual(str, type(welcome))
def test_help(self):
resp, lines = self.server.help()
self.assertTrue(resp.startswith("100 "), resp)
for line in lines:
self.assertEqual(str, type(line))
def test_list(self):
resp, groups = self.server.list()
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_list_active(self):
resp, groups = self.server.list(self.GROUP_PAT)
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_unknown_command(self):
with self.assertRaises(nntplib.NNTPPermanentError) as cm:
self.server._shortcmd("XYZZY")
resp = cm.exception.response
self.assertTrue(resp.startswith("500 "), resp)
def test_newgroups(self):
# gmane gets a constant influx of new groups. In order not to stress
# the server too much, we choose a recent date in the past.
dt = datetime.date.today() - datetime.timedelta(days=7)
resp, groups = self.server.newgroups(dt)
if len(groups) > 0:
self.assertIsInstance(groups[0], GroupInfo)
self.assertIsInstance(groups[0].group, str)
def test_description(self):
def _check_desc(desc):
# Sanity checks
self.assertIsInstance(desc, str)
self.assertNotIn(self.GROUP_NAME, desc)
desc = self.server.description(self.GROUP_NAME)
_check_desc(desc)
# Another sanity check
self.assertIn("Python", desc)
# With a pattern
desc = self.server.description(self.GROUP_PAT)
_check_desc(desc)
# Shouldn't exist
desc = self.server.description("zk.brrtt.baz")
self.assertEqual(desc, '')
def test_descriptions(self):
resp, descs = self.server.descriptions(self.GROUP_PAT)
# 215 for LIST NEWSGROUPS, 282 for XGTITLE
self.assertTrue(
resp.startswith("215 ") or resp.startswith("282 "), resp)
self.assertIsInstance(descs, dict)
desc = descs[self.GROUP_NAME]
self.assertEqual(desc, self.server.description(self.GROUP_NAME))
def test_group(self):
result = self.server.group(self.GROUP_NAME)
self.assertEqual(5, len(result))
resp, count, first, last, group = result
self.assertEqual(group, self.GROUP_NAME)
self.assertIsInstance(count, int)
self.assertIsInstance(first, int)
self.assertIsInstance(last, int)
self.assertLessEqual(first, last)
self.assertTrue(resp.startswith("211 "), resp)
def test_date(self):
resp, date = self.server.date()
self.assertIsInstance(date, datetime.datetime)
# Sanity check
self.assertGreaterEqual(date.year, 1995)
self.assertLessEqual(date.year, 2030)
def _check_art_dict(self, art_dict):
# Some sanity checks for a field dictionary returned by OVER / XOVER
self.assertIsInstance(art_dict, dict)
# NNTP has 7 mandatory fields
self.assertGreaterEqual(art_dict.keys(),
{"subject", "from", "date", "message-id",
"references", ":bytes", ":lines"}
)
for v in art_dict.values():
self.assertIsInstance(v, (str, type(None)))
def test_xover(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xover(last - 5, last)
if len(lines) == 0:
self.skipTest("no articles retrieved")
# The 'last' article is not necessarily part of the output (cancelled?)
art_num, art_dict = lines[0]
self.assertGreaterEqual(art_num, last - 5)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
@unittest.skipIf(True, 'temporarily skipped until a permanent solution'
' is found for issue #28971')
def test_over(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
start = last - 10
# The "start-" article range form
resp, lines = self.server.over((start, None))
art_num, art_dict = lines[0]
self._check_art_dict(art_dict)
# The "start-end" article range form
resp, lines = self.server.over((start, last))
art_num, art_dict = lines[-1]
# The 'last' article is not necessarily part of the output (cancelled?)
self.assertGreaterEqual(art_num, start)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
# XXX The "message_id" form is unsupported by gmane
# 503 Overview by message-ID unsupported
def test_xhdr(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xhdr('subject', last)
for line in lines:
self.assertEqual(str, type(line[1]))
def check_article_resp(self, resp, article, art_num=None):
self.assertIsInstance(article, nntplib.ArticleInfo)
if art_num is not None:
self.assertEqual(article.number, art_num)
for line in article.lines:
self.assertIsInstance(line, bytes)
# XXX this could exceptionally happen...
self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n"))
@unittest.skipIf(True, "FIXME: see bpo-32128")
def test_article_head_body(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
# Try to find an available article
for art_num in (last, first, last - 1):
try:
resp, head = self.server.head(art_num)
except nntplib.NNTPTemporaryError as e:
if not e.response.startswith("423 "):
raise
# "423 No such article" => choose another one
continue
break
else:
self.skipTest("could not find a suitable article number")
self.assertTrue(resp.startswith("221 "), resp)
self.check_article_resp(resp, head, art_num)
resp, body = self.server.body(art_num)
self.assertTrue(resp.startswith("222 "), resp)
self.check_article_resp(resp, body, art_num)
resp, article = self.server.article(art_num)
self.assertTrue(resp.startswith("220 "), resp)
self.check_article_resp(resp, article, art_num)
# Tolerate running the tests from behind a NNTP virus checker
blacklist = lambda line: line.startswith(b'X-Antivirus')
filtered_head_lines = [line for line in head.lines
if not blacklist(line)]
filtered_lines = [line for line in article.lines
if not blacklist(line)]
self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines)
def test_capabilities(self):
# The server under test implements NNTP version 2 and has a
# couple of well-known capabilities. Just sanity check that we
# got them.
def _check_caps(caps):
caps_list = caps['LIST']
self.assertIsInstance(caps_list, (list, tuple))
self.assertIn('OVERVIEW.FMT', caps_list)
self.assertGreaterEqual(self.server.nntp_version, 2)
_check_caps(self.server.getcapabilities())
# This re-emits the command
resp, caps = self.server.capabilities()
_check_caps(caps)
def test_zlogin(self):
# This test must be the penultimate because further commands will be
# refused.
baduser = "notarealuser"
badpw = "notarealpassword"
# Check that bogus credentials cause failure
self.assertRaises(nntplib.NNTPError, self.server.login,
user=baduser, password=badpw, usenetrc=False)
# FIXME: We should check that correct credentials succeed, but that
# would require valid details for some server somewhere to be in the
# test suite, I think. Gmane is anonymous, at least as used for the
# other tests.
def test_zzquit(self):
# This test must be called last, hence the name
cls = type(self)
try:
self.server.quit()
finally:
cls.server = None
@classmethod
def wrap_methods(cls):
# Wrap all methods in a transient_internet() exception catcher
# XXX put a generic version in test.support?
def wrap_meth(meth):
@functools.wraps(meth)
def wrapped(self):
with support.transient_internet(self.NNTP_HOST):
meth(self)
return wrapped
for name in dir(cls):
if not name.startswith('test_'):
continue
meth = getattr(cls, name)
if not callable(meth):
continue
# Need to use a closure so that meth remains bound to its current
# value
setattr(cls, name, wrap_meth(meth))
def test_with_statement(self):
def is_connected():
if not hasattr(server, 'file'):
return False
try:
server.help()
except (OSError, EOFError):
return False
return True
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
self.assertTrue(is_connected())
self.assertTrue(server.help())
self.assertFalse(is_connected())
with self.NNTP_CLASS(self.NNTP_HOST, timeout=TIMEOUT, usenetrc=False) as server:
server.quit()
self.assertFalse(is_connected())
NetworkedNNTPTestsMixin.wrap_methods()
EOF_ERRORS = (EOFError,)
if ssl is not None:
EOF_ERRORS += (ssl.SSLEOFError,)
class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase):
# This server supports STARTTLS (gmane doesn't)
NNTP_HOST = 'news.trigofacile.com'
GROUP_NAME = 'fr.comp.lang.python'
GROUP_PAT = 'fr.comp.lang.*'
NNTP_CLASS = NNTP
@classmethod
def setUpClass(cls):
support.requires("network")
with support.transient_internet(cls.NNTP_HOST):
try:
cls.server = cls.NNTP_CLASS(cls.NNTP_HOST, timeout=TIMEOUT,
usenetrc=False)
except EOF_ERRORS:
raise unittest.SkipTest(f"{cls} got EOF error on connecting "
f"to {cls.NNTP_HOST!r}")
@classmethod
def tearDownClass(cls):
if cls.server is not None:
cls.server.quit()
@unittest.skipUnless(ssl, 'requires SSL support')
class NetworkedNNTP_SSLTests(NetworkedNNTPTests):
# Technical limits for this public NNTP server (see http://www.aioe.org):
# "Only two concurrent connections per IP address are allowed and
# 400 connections per day are accepted from each IP address."
NNTP_HOST = 'nntp.aioe.org'
GROUP_NAME = 'comp.lang.python'
GROUP_PAT = 'comp.lang.*'
NNTP_CLASS = getattr(nntplib, 'NNTP_SSL', None)
# Disabled as it produces too much data
test_list = None
# Disabled as the connection will already be encrypted.
test_starttls = None
#
# Non-networked tests using a local server (or something mocking it).
#
class _NNTPServerIO(io.RawIOBase):
"""A raw IO object allowing NNTP commands to be received and processed
by a handler. The handler can push responses which can then be read
from the IO object."""
def __init__(self, handler):
io.RawIOBase.__init__(self)
# The channel from the client
self.c2s = io.BytesIO()
# The channel to the client
self.s2c = io.BytesIO()
self.handler = handler
self.handler.start(self.c2s.readline, self.push_data)
def readable(self):
return True
def writable(self):
return True
def push_data(self, data):
"""Push (buffer) some data to send to the client."""
pos = self.s2c.tell()
self.s2c.seek(0, 2)
self.s2c.write(data)
self.s2c.seek(pos)
def write(self, b):
"""The client sends us some data"""
pos = self.c2s.tell()
self.c2s.write(b)
self.c2s.seek(pos)
self.handler.process_pending()
return len(b)
def readinto(self, buf):
"""The client wants to read a response"""
self.handler.process_pending()
b = self.s2c.read(len(buf))
n = len(b)
buf[:n] = b
return n
def make_mock_file(handler):
sio = _NNTPServerIO(handler)
# Using BufferedRWPair instead of BufferedRandom ensures the file
# isn't seekable.
file = io.BufferedRWPair(sio, sio)
return (sio, file)
class MockedNNTPTestsMixin:
# Override in derived classes
handler_class = None
def setUp(self):
super().setUp()
self.make_server()
def tearDown(self):
super().tearDown()
del self.server
def make_server(self, *args, **kwargs):
self.handler = self.handler_class()
self.sio, file = make_mock_file(self.handler)
self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs)
return self.server
class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin):
def setUp(self):
super().setUp()
self.make_server(readermode=True)
class NNTPv1Handler:
"""A handler for RFC 977"""
welcome = "200 NNTP mock server"
def start(self, readline, push_data):
self.in_body = False
self.allow_posting = True
self._readline = readline
self._push_data = push_data
self._logged_in = False
self._user_sent = False
# Our welcome
self.handle_welcome()
def _decode(self, data):
return str(data, "utf-8", "surrogateescape")
def process_pending(self):
if self.in_body:
while True:
line = self._readline()
if not line:
return
self.body.append(line)
if line == b".\r\n":
break
try:
meth, tokens = self.body_callback
meth(*tokens, body=self.body)
finally:
self.body_callback = None
self.body = None
self.in_body = False
while True:
line = self._decode(self._readline())
if not line:
return
if not line.endswith("\r\n"):
raise ValueError("line doesn't end with \\r\\n: {!r}".format(line))
line = line[:-2]
cmd, *tokens = line.split()
#meth = getattr(self.handler, "handle_" + cmd.upper(), None)
meth = getattr(self, "handle_" + cmd.upper(), None)
if meth is None:
self.handle_unknown()
else:
try:
meth(*tokens)
except Exception as e:
raise ValueError("command failed: {!r}".format(line)) from e
else:
if self.in_body:
self.body_callback = meth, tokens
self.body = []
def expect_body(self):
"""Flag that the client is expected to post a request body"""
self.in_body = True
def push_data(self, data):
"""Push some binary data"""
self._push_data(data)
def push_lit(self, lit):
"""Push a string literal"""
lit = textwrap.dedent(lit)
lit = "\r\n".join(lit.splitlines()) + "\r\n"
lit = lit.encode('utf-8')
self.push_data(lit)
def handle_unknown(self):
self.push_lit("500 What?")
def handle_welcome(self):
self.push_lit(self.welcome)
def handle_QUIT(self):
self.push_lit("205 Bye!")
def handle_DATE(self):
self.push_lit("111 20100914001155")
def handle_GROUP(self, group):
if group == "fr.comp.lang.python":
self.push_lit("211 486 761 1265 fr.comp.lang.python")
else:
self.push_lit("411 No such group {}".format(group))
def handle_HELP(self):
self.push_lit("""\
100 Legal commands
authinfo user Name|pass Password|generic <prog> <args>
date
help
Report problems to <root@example.org>
.""")
def handle_STAT(self, message_spec=None):
if message_spec is None:
self.push_lit("412 No newsgroup selected")
elif message_spec == "3000234":
self.push_lit("223 3000234 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("223 0 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
def handle_NEXT(self):
self.push_lit("223 3000237 <668929@example.org> retrieved")
def handle_LAST(self):
self.push_lit("223 3000234 <45223423@example.com> retrieved")
def handle_LIST(self, action=None, param=None):
if action is None:
self.push_lit("""\
215 Newsgroups in form "group high low flags".
comp.lang.python 0000052340 0000002828 y
comp.lang.python.announce 0000001153 0000000993 m
free.it.comp.lang.python 0000000002 0000000002 y
fr.comp.lang.python 0000001254 0000000760 y
free.it.comp.lang.python.learner 0000000000 0000000001 y
tw.bbs.comp.lang.python 0000000304 0000000304 y
.""")
elif action == "ACTIVE":
if param == "*distutils*":
self.push_lit("""\
215 Newsgroups in form "group high low flags"
gmane.comp.python.distutils.devel 0000014104 0000000001 m
gmane.comp.python.distutils.cvs 0000000000 0000000001 m
.""")
else:
self.push_lit("""\
215 Newsgroups in form "group high low flags"
.""")
elif action == "OVERVIEW.FMT":
self.push_lit("""\
215 Order of fields in overview database.
Subject:
From:
Date:
Message-ID:
References:
Bytes:
Lines:
Xref:full
.""")
elif action == "NEWSGROUPS":
assert param is not None
if param == "comp.lang.python":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python\tThe Python computer language.
.""")
elif param == "comp.lang.python*":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python.announce\tAnnouncements about the Python language. (Moderated)
comp.lang.python\tThe Python computer language.
.""")
else:
self.push_lit("""\
215 Descriptions in form "group description".
.""")
else:
self.push_lit('501 Unknown LIST keyword')
def handle_NEWNEWS(self, group, date_str, time_str):
# We hard code different return messages depending on passed
# argument and date syntax.
if (group == "comp.lang.python" and date_str == "20100913"
and time_str == "082004"):
# Date was passed in RFC 3977 format (NNTP "v2")
self.push_lit("""\
230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == "comp.lang.python" and date_str == "100913"
and time_str == "082004"):
# Date was passed in RFC 977 format (NNTP "v1")
self.push_lit("""\
230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == 'comp.lang.python' and
date_str in ('20100101', '100101') and
time_str == '090000'):
self.push_lit('too long line' * 3000 +
'\n.')
else:
self.push_lit("""\
230 An empty list of newsarticles follows
.""")
# (Note for experiments: many servers disable NEWNEWS.
# As of this writing, sicinfo3.epfl.ch doesn't.)
def handle_XOVER(self, message_spec):
if message_spec == "57-59":
self.push_lit(
"224 Overview information for 57-58 follows\n"
"57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tSat, 19 Jun 2010 18:04:08 -0400"
"\t<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>"
"\t<hvalf7$ort$1@dough.gmane.org>\t7103\t16"
"\tXref: news.gmane.org gmane.comp.python.authors:57"
"\n"
"58\tLooking for a few good bloggers"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tThu, 22 Jul 2010 09:14:14 -0400"
"\t<A29863FA-F388-40C3-AA25-0FD06B09B5BF@gmail.com>"
"\t\t6683\t16"
"\t"
"\n"
# A UTF-8 overview line from fr.comp.lang.python
"59\tRe: Message d'erreur incompréhensible (par moi)"
"\tEric Brunel <eric.brunel@pragmadev.nospam.com>"
"\tWed, 15 Sep 2010 18:09:15 +0200"
"\t<eric.brunel-2B8B56.18091515092010@news.wanadoo.fr>"
"\t<4c90ec87$0$32425$ba4acef3@reader.news.orange.fr>\t1641\t27"
"\tXref: saria.nerim.net fr.comp.lang.python:1265"
"\n"
".\n")
else:
self.push_lit("""\
224 No articles
.""")
def handle_POST(self, *, body=None):
if body is None:
if self.allow_posting:
self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("440 Posting not permitted")
else:
assert self.allow_posting
self.push_lit("240 Article received OK")
self.posted_body = body
def handle_IHAVE(self, message_id, *, body=None):
if body is None:
if (self.allow_posting and
message_id == "<i.am.an.article.you.will.want@example.com>"):
self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("435 Article not wanted")
else:
assert self.allow_posting
self.push_lit("235 Article transferred OK")
self.posted_body = body
sample_head = """\
From: "Demo User" <nobody@example.net>
Subject: I am just a test article
Content-Type: text/plain; charset=UTF-8; format=flowed
Message-ID: <i.am.an.article.you.will.want@example.com>"""
sample_body = """\
This is just a test article.
..Here is a dot-starting line.
-- Signed by Andr\xe9."""
sample_article = sample_head + "\n\n" + sample_body
def handle_ARTICLE(self, message_spec=None):
if message_spec is None:
self.push_lit("220 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("220 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("220 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_article)
self.push_lit(".")
def handle_HEAD(self, message_spec=None):
if message_spec is None:
self.push_lit("221 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("221 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("221 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_head)
self.push_lit(".")
def handle_BODY(self, message_spec=None):
if message_spec is None:
self.push_lit("222 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("222 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("222 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_body)
self.push_lit(".")
def handle_AUTHINFO(self, cred_type, data):
if self._logged_in:
self.push_lit('502 Already Logged In')
elif cred_type == 'user':
if self._user_sent:
self.push_lit('482 User Credential Already Sent')
else:
self.push_lit('381 Password Required')
self._user_sent = True
elif cred_type == 'pass':
self.push_lit('281 Login Successful')
self._logged_in = True
else:
raise Exception('Unknown cred type {}'.format(cred_type))
class NNTPv2Handler(NNTPv1Handler):
"""A handler for RFC 3977 (NNTP "v2")"""
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1{}
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
READER
."""
if not self._logged_in:
self.push_lit(fmt.format('\n AUTHINFO USER'))
else:
self.push_lit(fmt.format(''))
def handle_MODE(self, _):
raise Exception('MODE READER sent despite READER has been advertised')
def handle_OVER(self, message_spec=None):
return self.handle_XOVER(message_spec)
class CapsAfterLoginNNTPv2Handler(NNTPv2Handler):
"""A handler that allows CAPABILITIES only after login"""
def handle_CAPABILITIES(self):
if not self._logged_in:
self.push_lit('480 You must log in.')
else:
super().handle_CAPABILITIES()
class ModeSwitchingNNTPv2Handler(NNTPv2Handler):
"""A server that starts in transit mode"""
def __init__(self):
self._switched = False
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
{}READER
."""
if self._switched:
self.push_lit(fmt.format(''))
else:
self.push_lit(fmt.format('MODE-'))
def handle_MODE(self, what):
assert not self._switched and what == 'reader'
self._switched = True
self.push_lit('200 Posting allowed')
class NNTPv1v2TestsMixin:
def setUp(self):
super().setUp()
def test_welcome(self):
self.assertEqual(self.server.welcome, self.handler.welcome)
def test_authinfo(self):
if self.nntp_version == 2:
self.assertIn('AUTHINFO', self.server._caps)
self.server.login('testuser', 'testpw')
# if AUTHINFO is gone from _caps we also know that getcapabilities()
# has been called after login as it should
self.assertNotIn('AUTHINFO', self.server._caps)
def test_date(self):
resp, date = self.server.date()
self.assertEqual(resp, "111 20100914001155")
self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55))
def test_quit(self):
self.assertFalse(self.sio.closed)
resp = self.server.quit()
self.assertEqual(resp, "205 Bye!")
self.assertTrue(self.sio.closed)
def test_help(self):
resp, help = self.server.help()
self.assertEqual(resp, "100 Legal commands")
self.assertEqual(help, [
' authinfo user Name|pass Password|generic <prog> <args>',
' date',
' help',
'Report problems to <root@example.org>',
])
def test_list(self):
resp, groups = self.server.list()
self.assertEqual(len(groups), 6)
g = groups[1]
self.assertEqual(g,
GroupInfo("comp.lang.python.announce", "0000001153",
"0000000993", "m"))
resp, groups = self.server.list("*distutils*")
self.assertEqual(len(groups), 2)
g = groups[0]
self.assertEqual(g,
GroupInfo("gmane.comp.python.distutils.devel", "0000014104",
"0000000001", "m"))
def test_stat(self):
resp, art_num, message_id = self.server.stat(3000234)
self.assertEqual(resp, "223 3000234 <45223423@example.com>")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
resp, art_num, message_id = self.server.stat("<45223423@example.com>")
self.assertEqual(resp, "223 0 <45223423@example.com>")
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat("<non.existent.id>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat()
self.assertEqual(cm.exception.response, "412 No newsgroup selected")
def test_next(self):
resp, art_num, message_id = self.server.next()
self.assertEqual(resp, "223 3000237 <668929@example.org> retrieved")
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<668929@example.org>")
def test_last(self):
resp, art_num, message_id = self.server.last()
self.assertEqual(resp, "223 3000234 <45223423@example.com> retrieved")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
def test_description(self):
desc = self.server.description("comp.lang.python")
self.assertEqual(desc, "The Python computer language.")
desc = self.server.description("comp.lang.pythonx")
self.assertEqual(desc, "")
def test_descriptions(self):
resp, groups = self.server.descriptions("comp.lang.python")
self.assertEqual(resp, '215 Descriptions in form "group description".')
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
})
resp, groups = self.server.descriptions("comp.lang.python*")
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
"comp.lang.python.announce": "Announcements about the Python language. (Moderated)",
})
resp, groups = self.server.descriptions("comp.lang.pythonx")
self.assertEqual(groups, {})
def test_group(self):
resp, count, first, last, group = self.server.group("fr.comp.lang.python")
self.assertTrue(resp.startswith("211 "), resp)
self.assertEqual(first, 761)
self.assertEqual(last, 1265)
self.assertEqual(count, 486)
self.assertEqual(group, "fr.comp.lang.python")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.group("comp.lang.python.devel")
exc = cm.exception
self.assertTrue(exc.response.startswith("411 No such group"),
exc.response)
def test_newnews(self):
# NEWNEWS comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("comp.lang.python", dt)
expected = (
"230 list of newsarticles (NNTP v{0}) "
"created after Mon Sep 13 08:20:04 2010 follows"
).format(self.nntp_version)
self.assertEqual(resp, expected)
self.assertEqual(ids, [
"<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>",
"<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>",
])
# NEWNEWS fr.comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("fr.comp.lang.python", dt)
self.assertEqual(resp, "230 An empty list of newsarticles follows")
self.assertEqual(ids, [])
def _check_article_body(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.")
self.assertEqual(lines[-2], b"")
self.assertEqual(lines[-3], b".Here is a dot-starting line.")
self.assertEqual(lines[-4], b"This is just a test article.")
def _check_article_head(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>')
self.assertEqual(lines[3], b"Message-ID: <i.am.an.article.you.will.want@example.com>")
def _check_article_data(self, lines):
self.assertEqual(len(lines), 9)
self._check_article_head(lines[:4])
self._check_article_body(lines[-4:])
self.assertEqual(lines[4], b"")
def test_article(self):
# ARTICLE
resp, info = self.server.article()
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE num
resp, info = self.server.article(3000234)
self.assertEqual(resp, "220 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE id
resp, info = self.server.article("<45223423@example.com>")
self.assertEqual(resp, "220 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.article("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_article_file(self):
# With a "file" argument
f = io.BytesIO()
resp, info = self.server.article(file=f)
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_head(self):
# HEAD
resp, info = self.server.head()
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD num
resp, info = self.server.head(3000234)
self.assertEqual(resp, "221 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD id
resp, info = self.server.head("<45223423@example.com>")
self.assertEqual(resp, "221 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.head("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_head_file(self):
f = io.BytesIO()
resp, info = self.server.head(file=f)
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertFalse(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_body(self):
# BODY
resp, info = self.server.body()
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY num
resp, info = self.server.body(3000234)
self.assertEqual(resp, "222 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY id
resp, info = self.server.body("<45223423@example.com>")
self.assertEqual(resp, "222 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.body("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_body_file(self):
f = io.BytesIO()
resp, info = self.server.body(file=f)
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertFalse(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def check_over_xover_resp(self, resp, overviews):
self.assertTrue(resp.startswith("224 "), resp)
self.assertEqual(len(overviews), 3)
art_num, over = overviews[0]
self.assertEqual(art_num, 57)
self.assertEqual(over, {
"from": "Doug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>",
"subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout",
"date": "Sat, 19 Jun 2010 18:04:08 -0400",
"message-id": "<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>",
"references": "<hvalf7$ort$1@dough.gmane.org>",
":bytes": "7103",
":lines": "16",
"xref": "news.gmane.org gmane.comp.python.authors:57"
})
art_num, over = overviews[1]
self.assertEqual(over["xref"], None)
art_num, over = overviews[2]
self.assertEqual(over["subject"],
"Re: Message d'erreur incompréhensible (par moi)")
def test_xover(self):
resp, overviews = self.server.xover(57, 59)
self.check_over_xover_resp(resp, overviews)
def test_over(self):
# In NNTP "v1", this will fallback on XOVER
resp, overviews = self.server.over((57, 59))
self.check_over_xover_resp(resp, overviews)
sample_post = (
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n'
b'Message-ID: <i.am.an.article.you.will.want@example.com>\r\n'
b'\r\n'
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
)
def _check_posted_body(self):
# Check the raw body as received by the server
lines = self.handler.posted_body
# One additional line for the "." terminator
self.assertEqual(len(lines), 10)
self.assertEqual(lines[-1], b'.\r\n')
self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n')
self.assertEqual(lines[-3], b'\r\n')
self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n')
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>\r\n')
def _check_post_ihave_sub(self, func, *args, file_factory):
# First the prepared post with CRLF endings
post = self.sample_post
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
# Then the same post with "normal" line endings - they should be
# converted by NNTP.post and NNTP.ihave.
post = self.sample_post.replace(b"\r\n", b"\n")
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
return resp
def check_post_ihave(self, func, success_resp, *args):
# With a bytes object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytes)
self.assertEqual(resp, success_resp)
# With a bytearray object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray)
self.assertEqual(resp, success_resp)
# With a file object
resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO)
self.assertEqual(resp, success_resp)
# With an iterable of terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=True))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
# With an iterable of non-terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=False))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
def test_post(self):
self.check_post_ihave(self.server.post, "240 Article received OK")
self.handler.allow_posting = False
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.post(self.sample_post)
self.assertEqual(cm.exception.response,
"440 Posting not permitted")
def test_ihave(self):
self.check_post_ihave(self.server.ihave, "235 Article transferred OK",
"<i.am.an.article.you.will.want@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.ihave("<another.message.id>", self.sample_post)
self.assertEqual(cm.exception.response,
"435 Article not wanted")
def test_too_long_lines(self):
dt = datetime.datetime(2010, 1, 1, 9, 0, 0)
self.assertRaises(nntplib.NNTPDataError,
self.server.newnews, "comp.lang.python", dt)
class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v1 server (no capabilities)."""
nntp_version = 1
handler_class = NNTPv1Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {})
self.assertEqual(self.server.nntp_version, 1)
self.assertEqual(self.server.nntp_implementation, None)
class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v2 server (with capabilities)."""
nntp_version = 2
handler_class = NNTPv2Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {
'VERSION': ['2', '3'],
'IMPLEMENTATION': ['INN', '2.5.1'],
'AUTHINFO': ['USER'],
'HDR': [],
'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS',
'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'],
'OVER': [],
'POST': [],
'READER': [],
})
self.assertEqual(self.server.nntp_version, 3)
self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1')
class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase):
"""Tests a probably NNTP v2 server with capabilities only after login."""
nntp_version = 2
handler_class = CapsAfterLoginNNTPv2Handler
def test_caps_only_after_login(self):
self.assertEqual(self.server._caps, {})
self.server.login('testuser', 'testpw')
self.assertIn('VERSION', self.server._caps)
class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin,
unittest.TestCase):
"""Same tests as for v2 but we tell NTTP to send MODE READER to a server
that isn't in READER mode by default."""
nntp_version = 2
handler_class = ModeSwitchingNNTPv2Handler
def test_we_are_in_reader_mode_after_connect(self):
self.assertIn('READER', self.server._caps)
class MiscTests(unittest.TestCase):
def test_decode_header(self):
def gives(a, b):
self.assertEqual(nntplib.decode_header(a), b)
gives("" , "")
gives("a plain header", "a plain header")
gives(" with extra spaces ", " with extra spaces ")
gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python")
gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?="
" =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=",
"Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées")
gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=",
"Re: problème de matrice")
# A natively utf-8 header (found in the real world!)
gives("Re: Message d'erreur incompréhensible (par moi)",
"Re: Message d'erreur incompréhensible (par moi)")
def test_parse_overview_fmt(self):
# The minimal (default) response
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# The minimal response using alternative names
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# Variations in casing
lines = ["subject:", "FROM:", "DaTe:", "message-ID:",
"References:", "BYTES:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# First example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines", "Xref:full",
"Distribution:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# Second example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:FULL",
"Distribution:FULL"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# A classic response from INN
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref"])
def test_parse_overview(self):
fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"]
# First example from RFC 3977
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\tXref: news.example.com misc.test:3000363',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(art_num, 3000234)
self.assertEqual(fields, {
'subject': 'I am just a test article',
'from': '"Demo User" <nobody@example.com>',
'date': '6 Oct 1998 04:38:40 -0500',
'message-id': '<45223423@example.com>',
'references': '<45454@example.net>',
':bytes': '1234',
':lines': '17',
'xref': 'news.example.com misc.test:3000363',
})
# Second example; here the "Xref" field is totally absent (including
# the header name) and comes out as None
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\t\t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['xref'], None)
# Third example; the "Xref" is an empty string, while "references"
# is a single space.
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t \t1234\t'
'17\tXref: \t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['references'], ' ')
self.assertEqual(fields['xref'], '')
def test_parse_datetime(self):
def gives(a, b, *c):
self.assertEqual(nntplib._parse_datetime(a, b),
datetime.datetime(*c))
# Output of DATE command
gives("19990623135624", None, 1999, 6, 23, 13, 56, 24)
# Variations
gives("19990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("090623", "135624", 2009, 6, 23, 13, 56, 24)
def test_unparse_datetime(self):
# Test non-legacy mode
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "19990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "20000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "20100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, "19990623", "000000")
gives(2000, 6, 23, "20000623", "000000")
gives(2010, 6, 5, "20100605", "000000")
def test_unparse_datetime_legacy(self):
# Test legacy mode (RFC 977)
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, "990623", "000000")
gives(2000, 6, 23, "000623", "000000")
gives(2010, 6, 5, "100605", "000000")
@unittest.skipUnless(ssl, 'requires SSL support')
def test_ssl_support(self):
self.assertTrue(hasattr(nntplib, 'NNTP_SSL'))
class PublicAPITests(unittest.TestCase):
"""Ensures that the correct values are exposed in the public API."""
def test_module_all_attribute(self):
self.assertTrue(hasattr(nntplib, '__all__'))
target_api = ['NNTP', 'NNTPError', 'NNTPReplyError',
'NNTPTemporaryError', 'NNTPPermanentError',
'NNTPProtocolError', 'NNTPDataError', 'decode_header']
if ssl is not None:
target_api.append('NNTP_SSL')
self.assertEqual(set(nntplib.__all__), set(target_api))
class MockSocketTests(unittest.TestCase):
"""Tests involving a mock socket object
Used where the _NNTPServerIO file object is not enough."""
nntp_class = nntplib.NNTP
def check_constructor_error_conditions(
self, handler_class,
expected_error_type, expected_error_msg,
login=None, password=None):
class mock_socket_module:
def create_connection(address, timeout):
return MockSocket()
class MockSocket:
def close(self):
nonlocal socket_closed
socket_closed = True
def makefile(socket, mode):
handler = handler_class()
_, file = make_mock_file(handler)
files.append(file)
return file
socket_closed = False
files = []
with patch('nntplib.socket', mock_socket_module), \
self.assertRaisesRegex(expected_error_type, expected_error_msg):
self.nntp_class('dummy', user=login, password=password)
self.assertTrue(socket_closed)
for f in files:
self.assertTrue(f.closed)
def test_bad_welcome(self):
#Test a bad welcome message
class Handler(NNTPv1Handler):
welcome = 'Bad Welcome'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPProtocolError, Handler.welcome)
def test_service_temporarily_unavailable(self):
#Test service temporarily unavailable
class Handler(NNTPv1Handler):
welcome = '400 Service temporarily unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPTemporaryError, Handler.welcome)
def test_service_permanently_unavailable(self):
#Test service permanently unavailable
class Handler(NNTPv1Handler):
welcome = '502 Service permanently unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, Handler.welcome)
def test_bad_capabilities(self):
#Test a bad capabilities response
class Handler(NNTPv1Handler):
def handle_CAPABILITIES(self):
self.push_lit(capabilities_response)
capabilities_response = '201 bad capability'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPReplyError, capabilities_response)
def test_login_aborted(self):
#Test a bad authinfo response
login = 't@e.com'
password = 'python'
class Handler(NNTPv1Handler):
def handle_AUTHINFO(self, *args):
self.push_lit(authinfo_response)
authinfo_response = '503 Mechanism not recognized'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, authinfo_response,
login, password)
class bypass_context:
"""Bypass encryption and actual SSL module"""
def wrap_socket(sock, **args):
return sock
@unittest.skipUnless(ssl, 'requires SSL support')
class MockSslTests(MockSocketTests):
@staticmethod
def nntp_class(*pos, **kw):
return nntplib.NNTP_SSL(*pos, ssl_context=bypass_context, **kw)
class LocalServerTests(unittest.TestCase):
def setUp(self):
sock = socket.socket()
port = support.bind_port(sock)
sock.listen()
self.background = threading.Thread(
target=self.run_server, args=(sock,))
self.background.start()
self.addCleanup(self.background.join)
self.nntp = NNTP(support.HOST, port, usenetrc=False).__enter__()
self.addCleanup(self.nntp.__exit__, None, None, None)
def run_server(self, sock):
# Could be generalized to handle more commands in separate methods
with sock:
[client, _] = sock.accept()
with contextlib.ExitStack() as cleanup:
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
client.sendall(b'200 Server ready\r\n')
while True:
cmd = reader.readline()
if cmd == b'CAPABILITIES\r\n':
client.sendall(
b'101 Capability list:\r\n'
b'VERSION 2\r\n'
b'STARTTLS\r\n'
b'.\r\n'
)
elif cmd == b'STARTTLS\r\n':
reader.close()
client.sendall(b'382 Begin TLS negotiation now\r\n')
context = ssl.SSLContext()
context.load_cert_chain(certfile)
client = context.wrap_socket(
client, server_side=True)
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
elif cmd == b'QUIT\r\n':
client.sendall(b'205 Bye!\r\n')
break
else:
raise ValueError('Unexpected command {!r}'.format(cmd))
@unittest.skipUnless(ssl, 'requires SSL support')
def test_starttls(self):
file = self.nntp.file
sock = self.nntp.sock
self.nntp.starttls()
# Check that the socket and internal pseudo-file really were
# changed.
self.assertNotEqual(file, self.nntp.file)
self.assertNotEqual(sock, self.nntp.sock)
# Check that the new socket really is an SSL one
self.assertIsInstance(self.nntp.sock, ssl.SSLSocket)
# Check that trying starttls when it's already active fails.
self.assertRaises(ValueError, self.nntp.starttls)
if __name__ == "__main__":
unittest.main()
|
template-gui.py | from tkinter import *
from tkinter import ttk, messagebox
from halo import Halo
from threading import Thread
root = Tk()
root.geometry('800x600+500+150')
def start():
spinner = Halo(text='App is running', placement='right', text_color='green' , color='cyan')
spinner.animation
t = Thread(target=lambda:spinner.start())
t.start()
root.mainloop()
while True:
if root.quit:
spinner.stop()
exit(0)
start() |
stats_requester.py | """
Simple timed stat requester. Requests stats that all other methods can listen for
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
import time
import threading
import math
log = core.getLogger()
stat_interval_seconds = 1
def launch():
def request_stats_loop():
next_time = math.floor(time.time()) + 0.5
while True:
for conn in core.openflow.connections:
conn.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
while (time.time() >= next_time):
next_time += stat_interval_seconds
time.sleep(next_time - time.time())
stat_loop = threading.Thread(target=request_stats_loop)
stat_loop.start()
|
multidownloadXkcd.py | #! python3
# multidownloadXkcd.py - Downloads XKCD comics using multiple threads.
import requests, os, bs4, threading
os.chdir(r'.\Chapter-15_scheduling_tasks')
os.makedirs('xkcd', exist_ok=True)
def downloadXkcd(startComic, endComic):
for urlNumber in range(startComic, endComic):
# Download the page
if urlNumber == 404:
continue
print(f'Downloading page http://xkcd.com/{urlNumber}')
res = requests.get(f'https://xkcd.com/{urlNumber}')
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text)
# Find the url of the comic image.
comicElem = soup.select('#comic img')
if comicElem == []:
print('Could not find comic image')
else:
comicUrl = comicElem[0].get('src')
# Download the image.
print(f'Downloading image {comicUrl}')
res = requests.get('https:' + comicUrl)
res.raise_for_status()
# Save image to ./xkcd
imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
# Create and start the Thread objects.
downloadThreads = []
for i in range(1, 1400, 100):
downloadThread =threading.Thread(target=downloadXkcd, args=(i, i+98))
downloadThreads.append(downloadThread)
downloadThread.start()
# Wait for all threads to end.
for i in downloadThreads:
i.join()
print('Done.') |
LogCollectDaemon.py | import subprocess
import uuid, threading, datetime
from pymongo import MongoClient
import time
class RunDaemon():
def __init__(self):
super(RunDaemon, self).__init__()
self.thread_stop_flag = True
self.MONGO_URL = "localhost"
self.MONGO_PORT = 27017
self.MONGO_DB = "LogNotifier"
self.MONGO_COLLECTION = "alert_log"
self.MONGO_COLLECTION_SERVER_DATA = "server_data"
self.SEVERITY_LEVELS = {
'-1': 'None',
'0': 'Emergency',
'1': 'Alert',
'2': 'Critical',
'3': 'Error',
'4': 'Warning',
}
self.SEVERITY_COLOR_CODE = {
'0': 'red',
'1': 'red',
'2': 'red',
'3': 'orange',
'4': 'blue'
}
def run(self):
logsources = self.getUniqueServerLocations()
threads = []
cnt = 0
for src in logsources:
cnt += 1
lastValidSeqIndex = self.getLastLogSeqIndex(src)
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION]
threads.append(
threading.Thread(target=self.job, name='Thread-' + str(cnt), args=(int(lastValidSeqIndex), db, src, ))
)
print("Starting threads...")
for t in threads:
t.start()
print('Joining Threads...')
for t in threads:
t.join()
print("Background log collection daemon started...")
def job(self, lastValidSeqIndex, db, logsource):
while self.thread_stop_flag:
process = subprocess.Popen(["tail", "-f", logsource], stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
lastValidSeqIndex = lastValidSeqIndex + 1
self.parseData(output.strip(), logsource, lastValidSeqIndex, db)
rc = process.poll()
return rc
# Parse Log Line
def parseData(self, logData, logsource, n, db):
logData = logData.decode("utf-8")
log_data = {'alert_id': str(uuid.uuid4()), 'alarm_name': '', 'alarm_type': '', 'alarm_details': '',
'log_seq_index': '', 'severity': '', 'color': '', 'facility': '', 'datestamp': '',
'timestamp': '', 'datetimestamp': '', 'hostname': '', 'app-name': '', 'comments': '',
'logsource': logsource, 'servername': '', 'assignee': '', 'assignto': '', 'ackstatus': '',
'collecttimestamp': '', "assignment-name": '', 'app_name': ''
};
sev_fac = logData[1:logData.index(')')].split(", ")
logData = logData[logData.index(") ") + 2:]
try:
log_data['alarm_type'] = self.SEVERITY_LEVELS[sev_fac[0]]
except:
return
log_data['severity'] = sev_fac[0]
log_data['color'] = self.SEVERITY_COLOR_CODE[sev_fac[0]]
log_data['facility'] = sev_fac[1]
log_data['datestamp'] = logData[:logData.index(' ')]
logData = logData[logData.index(" ") + 1:]
log_data['timestamp'] = logData[:logData.index(' ')]
logData = logData[logData.index(" ") + 1:]
log_data['hostname'] = logData[:logData.index(' ')]
logData = logData[logData.index(" ") + 1:]
log_data['app-name'] = logData[:logData.index(' ')]
log_data['app_name'] = logData[:logData.index(' ')]
logData = logData[logData.index(" ") + 1:]
log_data['alarm_details'] = logData.strip("\n")
d = datetime.datetime.strptime(log_data['datestamp'] + " " + log_data['timestamp'], "%Y-%m-%d %H:%M:%S")
log_data['datetimestamp'] = d
log_data['log_seq_index'] = n
log_data['collecttimestamp'] = datetime.datetime.now()
db.insert_one(log_data)
# Gets the last index of log insert into the database
def getLastLogSeqIndex(self, logsource):
lastIndex = 0
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION]
cur = db.find({"logsource": logsource}).sort("log_seq_index", -1).limit(1)
if cur.count() != 0:
lastIndex = cur[0]["log_seq_index"]
cur.close()
con.close()
return lastIndex
def getUniqueServerLocations(self):
logsourcelist = []
con = MongoClient(self.MONGO_URL, self.MONGO_PORT)
db = con[self.MONGO_DB][self.MONGO_COLLECTION_SERVER_DATA]
cur = db.distinct("logsource")
for c in cur:
logsourcelist.append(str(c))
con.close()
return logsourcelist
# def test():
# process = subprocess.run(['ps', '-aux'],
# stdout=subprocess.PIPE,
# universal_newlines=True)
# out = process.stdout.split("\n")
# pids = []
# for o in out:
# if o.__contains__("LogCollectDaemon.py"):
# print("Found: \n", o)
# r = o.split(" ")[5]
# pids.append(r)
#
# return pids
#
#
# def killBackgroundProcess(id):
# process = subprocess.run(['kill', '-9', str(id)],
# stdout=subprocess.PIPE,
# universal_newlines=True)
# print("Killing process...", id, " done")
if __name__ == '__main__':
# s = test()
# print(s)
# for i in s:
# killBackgroundProcess(i)
# time.sleep(3)
d = RunDaemon()
d.run()
|
uf2dialog.py | import logging
import threading
import os.path
import time
import traceback
import tkinter.font as tkfont
import urllib.request
from tkinter import ttk, messagebox
from typing import Optional
from urllib.request import urlopen
from thonny import get_runner
from thonny.languages import tr
from thonny.misc_utils import list_volumes
from thonny.plugins.micropython import (
BareMetalMicroPythonProxy,
list_serial_ports_with_descriptions,
list_serial_ports,
)
from thonny.ui_utils import (
set_text_if_different,
ems_to_pixels,
)
from thonny.workdlg import WorkDialog
logger = logging.getLogger(__name__)
class Uf2FlashingDialog(WorkDialog):
def __init__(self, master):
self._release_info = None
self._possible_targets = []
super().__init__(master)
self._start_downloading_release_info()
def populate_main_frame(self):
pad = self.get_padding()
inpad = self.get_internal_padding()
latest_ver_caption = tr("Version to be installed")
version_caption_label = ttk.Label(self.main_frame, text=latest_ver_caption + ":")
version_caption_label.grid(
row=0, column=0, sticky="w", padx=(pad, inpad), pady=(pad, inpad)
)
self._version_label = ttk.Label(self.main_frame, text=tr("please wait") + " ...")
self._version_label.grid(row=0, column=1, padx=(0, pad), pady=(pad, inpad), sticky="w")
device_location_caption = tr("Target device location")
self.target_caption_label = ttk.Label(self.main_frame, text=device_location_caption + ":")
self.target_caption_label.grid(
row=1, column=0, padx=(pad, inpad), pady=(0, inpad), sticky="w"
)
# add width, so that this label prescribes the width of the dialog and it doesn't grow
# when the progressbar and action text are gridded
self.target_label = ttk.Label(self.main_frame, text="", width=self.get_info_text_width())
self.target_label.grid(row=1, column=1, padx=(0, pad), pady=(0, inpad), sticky="w")
device_model_caption = tr("Target device model")
self.model_caption_label = ttk.Label(self.main_frame, text=device_model_caption + ":")
self.model_caption_label.grid(
row=2, column=0, padx=(pad, inpad), pady=(0, inpad), sticky="w"
)
self.model_label = ttk.Label(self.main_frame, text="", width=self.get_info_text_width())
self.model_label.grid(row=2, column=1, padx=(0, pad), pady=(0, inpad), sticky="w")
# Resize progress bar to align with this grid
default_font = tkfont.nametofont("TkDefaultFont")
max_caption_len = max(
[
default_font.measure(caption + ":")
for caption in [latest_ver_caption, device_location_caption, device_model_caption]
]
)
self._progress_bar["length"] = max_caption_len
def get_info_text_width(self):
return 40
def get_action_text_max_length(self):
return 20
def get_instructions(self) -> Optional[str]:
return (
"This dialog allows you to install or update MicroPython on your device.\n"
"\n"
"1. Put your device into bootloader mode.\n"
"2. Wait until device information appears.\n"
"3. Click 'Install' and wait for some seconds until done.\n"
"4. Close the dialog and start programming!"
)
def get_ok_text(self):
return tr("Install")
def _get_release_info_url(self):
raise NotImplementedError()
def _get_fallback_release_info_url(self):
raise NotImplementedError()
def _start_downloading_release_info(self):
threading.Thread(target=self._download_release_info, daemon=True).start()
def _download_release_info(self):
import json
from urllib.request import urlopen
try:
with urlopen(self._get_release_info_url()) as fp:
self._release_info = json.loads(fp.read().decode("UTF-8"))
if not self._release_info.get("tag_name"):
self._release_info = None
except Exception as e:
logger.warning(
"Could not find release info from %s", self._get_release_info_url(), exc_info=e
)
if not self._release_info:
try:
self.append_text(
"Warning: Could not find release info from %s, trying %s instead\n"
% (self._get_release_info_url(), self._get_fallback_release_info_url())
)
with urlopen(self._get_fallback_release_info_url()) as fp:
self._release_info = json.loads(fp.read().decode("UTF-8"))
except Exception as e:
self.append_text(
"Could not find release info from %s\n" % self._get_fallback_release_info_url()
)
self.set_action_text("Error!")
self.grid_progress_widgets()
def update_ui(self):
if self._state == "idle":
self._possible_targets = self.get_possible_targets()
if not self._possible_targets:
set_text_if_different(self.target_label, "")
set_text_if_different(self.model_label, "")
else:
unpacked = list(zip(*self._possible_targets))
set_text_if_different(self.target_label, "\n".join(unpacked[0]))
set_text_if_different(self.model_label, "\n".join(unpacked[2]))
unknown_version_text = tr("Please wait") + "..."
desc = self.get_firmware_description()
if desc is None:
set_text_if_different(self._version_label, unknown_version_text)
else:
set_text_if_different(self._version_label, desc)
super(Uf2FlashingDialog, self).update_ui()
def get_firmware_description(self):
if self._release_info is None:
return None
else:
return (
self._release_info["tag_name"]
+ " ("
+ self._release_info["published_at"][:10]
+ ")"
)
def get_download_url_and_size(self, board_id):
if self._release_info is None:
return None
candidates = [
asset
for asset in self._release_info["assets"]
if self._is_suitable_asset(asset, board_id)
]
if len(candidates) == 0:
raise RuntimeError(
"Could not find the right file from the release info (%s)"
% self._get_release_info_url()
)
elif len(candidates) > 1:
raise RuntimeError(
"Found several possible files from the release info (%s)"
% self._get_release_info_url()
)
else:
return (candidates[0]["browser_download_url"], candidates[0]["size"])
def _is_suitable_asset(self, asset, model_id):
raise NotImplementedError()
def is_ready_for_work(self):
# Called after update_ui
return self._possible_targets and self._release_info
@classmethod
def get_possible_targets(cls):
all_vol_infos = [
(vol, cls.find_device_board_id_and_model(vol))
for vol in list_volumes(skip_letters=["A"])
]
return [(info[0], info[1][0], info[1][1]) for info in all_vol_infos if info[1] is not None]
def start_work(self):
if len(self._possible_targets) > 1:
# size 0 is checked elsewhere
messagebox.showerror(
"Can't proceed",
"You seem to have plugged in %d compatible devices.\n"
+ "Please leave only one and unplug the others!",
parent=self,
)
return False
target_dir, board_id, _ = self._possible_targets[0]
try:
download_url, size = self.get_download_url_and_size(board_id)
except Exception as e:
logger.error("Could not determine download url", exc_info=e)
messagebox.showerror("Could not determine download url", str(e), parent=self)
return False
self.report_progress(0, size)
proxy = get_runner().get_backend_proxy()
if isinstance(proxy, BareMetalMicroPythonProxy):
proxy.disconnect()
threading.Thread(
target=self._perform_work, args=[download_url, size, target_dir], daemon=True
).start()
return True
@classmethod
def find_device_board_id_and_model(cls, mount_path):
info_path = os.path.join(mount_path, "INFO_UF2.TXT")
if not os.path.isfile(info_path):
return None
board_id = None
model = None
with open(info_path, "r", encoding="UTF-8", errors="replace") as fp:
for line in fp:
parts = list(map(str.strip, line.split(":", maxsplit=1)))
if len(parts) == 2:
if parts[0] == "Model":
model = parts[1]
elif parts[0] == "Board-ID":
board_id = parts[1]
if not cls._is_relevant_board_id(board_id):
return None
if board_id and model:
return board_id, model
return None
@classmethod
def _is_relevant_board_id(cls, board_id):
return True
def _get_vid_pids_to_wait_for(self):
"""If result is non-empty then the process completes until a device with one of the vid-pid pairs appears"""
return set()
def _perform_work(self, download_url, size, target_dir):
try:
self._download_to_the_device(download_url, size, target_dir)
if self._state == "working" and self._get_vid_pids_to_wait_for():
self._wait_for_vid_pids()
except Exception as e:
self.append_text("\n" + "".join(traceback.format_exc()))
self.set_action_text("Error...")
self.report_done(False)
return
if self._state == "working":
self.append_text("\nDone!\n")
self.set_action_text("Done!")
self.report_done(True)
else:
assert self._state == "cancelling"
self.append_text("\nCancelled\n")
self.set_action_text("Cancelled")
self.report_done(False)
def _wait_for_vid_pids(self):
target_set = set(self._get_vid_pids_to_wait_for())
if not target_set:
return
self.append_text("\nWaiting for the port...\n")
self.set_action_text("Waiting for the port...")
wait_time = 0
step = 0.2
while wait_time < 10:
for p in list_serial_ports():
vidpid = (p.vid, p.pid)
if vidpid in target_set or (p.vid, None) in target_set:
self.append_text("Found %s at %s\n" % ("%04x:%04x" % vidpid, p.device))
self.set_action_text("Found port")
return
if self._state == "cancelling":
return
time.sleep(step)
wait_time += step
else:
self.set_action_text("Warning: Could not find port")
self.append_text("Warning: Could not find port in %s seconds\n" % int(wait_time))
# leave some time to see the warning
time.sleep(2)
def _download_to_the_device(self, download_url, size, target_dir):
"""Running in a bg thread"""
target_path = os.path.join(target_dir, "firmware")
self.set_action_text("Starting...")
self.append_text("Downloading %d bytes from %s\n" % (size, download_url))
req = urllib.request.Request(
download_url,
data=None,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
},
)
with urlopen(req, timeout=5) as fsrc:
bytes_copied = 0
self.append_text("Writing to %s\n" % target_path)
self.append_text("Starting...")
if fsrc.length:
# override (possibly inaccurate) size
size = fsrc.length
with open(target_path, "wb") as fdst:
while True:
buf = fsrc.read(8 * 1024)
if not buf:
break
if self._state == "cancelling":
break
fdst.write(buf)
fdst.flush()
os.fsync(fdst.fileno())
bytes_copied += len(buf)
percent_str = "%.0f%%" % (bytes_copied / size * 100)
self.set_action_text("Copying... " + percent_str)
self.report_progress(bytes_copied, size)
self.replace_last_line(percent_str)
def get_title(self):
return "Install MicroPython firmware"
|
test_state.py | # -*- coding: utf-8 -*-
'''
Tests for the state runner
'''
# Import Python Libs
from __future__ import absolute_import
import errno
import os
import shutil
import signal
import tempfile
import textwrap
import yaml
import threading
from salt.ext.six.moves import queue
# Import Salt Testing Libs
from tests.support.case import ShellCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
# Import Salt Libs
import salt.utils.platform
import salt.utils.event
import salt.utils.files
class StateRunnerTest(ShellCase):
'''
Test the state runner.
'''
def add_to_queue(self, q, cmd):
'''
helper method to add salt-run
return data to a queue
'''
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
def test_orchestrate_output(self):
'''
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
'''
#ret_output = self.run_run_plus('state.orchestrate', 'orch.simple')['out']
ret_output = self.run_run('state.orchestrate orch.simple')
bad_out = ['outputter:', ' highstate']
good_out = [' Function: salt.state',
' Result: True',
'Succeeded: 1 (changed=1)',
'Failed: 0',
'Total states run: 1']
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
self.assertIsNot(bad_out, ret_output)
# Now test that some expected good sample output is present in the return.
for item in good_out:
self.assertIn(item, ret_output)
def test_orchestrate_nested(self):
'''
test salt-run state.orchestrate and failhard with nested orchestration
'''
if os.path.exists('/tmp/ewu-2016-12-13'):
os.remove('/tmp/ewu-2016-12-13')
_, code = self.run_run(
'state.orchestrate nested-orch.outer',
with_retcode=True)
self.assertFalse(os.path.exists('/tmp/ewu-2016-12-13'))
self.assertNotEqual(code, 0)
def test_orchestrate_target_exists(self):
'''
test orchestration when target exists
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-exists')
first = [' ID: core',
' Function: salt.state',
' Result: True']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_orchestrate_retcode(self):
'''
Test orchestration with nonzero retcode set in __context__
'''
self.run_run('saltutil.sync_runners')
self.run_run('saltutil.sync_wheel')
ret = '\n'.join(self.run_run('state.orchestrate orch.retcode'))
for result in (' ID: test_runner_success\n'
' Function: salt.runner\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_runner_failure\n'
' Function: salt.runner\n'
' Name: runtests_helpers.failure\n'
' Result: False',
' ID: test_wheel_success\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.success\n'
' Result: True',
' ID: test_wheel_failure\n'
' Function: salt.wheel\n'
' Name: runtests_helpers.failure\n'
' Result: False'):
self.assertIn(result, ret)
def test_orchestrate_target_doesnt_exists(self):
'''
test orchestration when target doesnt exist
while using multiple states
'''
ret = self.run_run('state.orchestrate orch.target-doesnt-exists')
first = ['No minions matched the target. No command was sent, no jid was assigned.',
' ID: core',
' Function: salt.state',
' Result: False']
second = [' ID: test-state',
' Function: salt.state',
' Result: True']
third = [' ID: cmd.run',
' Function: salt.function',
' Result: True']
ret_out = [first, second, third]
for out in ret_out:
for item in out:
self.assertIn(item, ret)
def test_state_event(self):
'''
test to ensure state.event
runner returns correct data
'''
q = queue.Queue(maxsize=0)
cmd = 'state.event salt/job/*/new count=1'
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt('minion test.ping --static')
out = q.get()
self.assertIn(expect, str(out))
server_thread.join()
@skipIf(salt.utils.platform.is_windows(), '*NIX-only test')
class OrchEventTest(ShellCase):
'''
Tests for orchestration events
'''
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(self.get_config_dir(), 'master.d')
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode='w',
suffix='.conf',
dir=self.master_d_dir,
delete=True,
)
self.base_env = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ('timeout', 'master_d_dir', 'conf', 'base_env'):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, 'test.arg', __reload_config=True)
def alarm_handler(self, signal, frame):
raise Exception('Timeout of {0} seconds reached'.format(self.timeout))
def write_conf(self, data):
'''
Dump the config dict to the conf file
'''
self.conf.write(yaml.dump(data, default_flow_style=False))
self.conf.flush()
def test_jid_in_ret_event(self):
'''
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
'''
self.write_conf({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [self.base_env],
},
})
state_sls = os.path.join(self.base_env, 'test_state.sls')
with salt.utils.files.fopen(state_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date:
cmd.run
'''))
orch_sls = os.path.join(self.base_env, 'test_orch.sls')
with salt.utils.files.fopen(orch_sls, 'w') as fp_:
fp_.write(textwrap.dedent('''
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
'''))
listener = salt.utils.event.get_event(
'master',
sock_dir=self.master_opts['sock_dir'],
transport=self.master_opts['transport'],
opts=self.master_opts)
jid = self.run_run_plus(
'state.orchestrate',
'test_orch',
__reload_config=True).get('jid')
if jid is None:
raise Exception('jid missing from run_run_plus output')
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event['tag'] == 'salt/run/{0}/ret'.format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event['data']['return']['data']['master']
for job in ret:
self.assertTrue('__jid__' in ret[job])
break
finally:
del listener
signal.alarm(0)
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_input_none(self):
"""input=None has a legacy meaning of input='' on check_output."""
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None)
self.assertNotIn(b'XX', output)
def test_check_output_input_none_text(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, text=True)
self.assertNotIn('XX', output)
def test_check_output_input_none_universal_newlines(self):
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; print('XX' if sys.stdin.read() else '')"],
input=None, universal_newlines=True)
self.assertNotIn('XX', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"buf = sys.stdout.buffer; "
"buf.write(os.getcwd().encode()); "
"buf.flush(); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode()))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# Pyston change: we set this variable on purpose
if 'SETUPTOOLS_USE_DISTUTILS' in n:
return True
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any argumenets that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
pid = proc.pid
pid, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
kivy_ui.py | import json
import re
import time
from copy import copy
from datetime import datetime
from functools import partial
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from collections import namedtuple
from kivy.logger import Logger
import io
import os
import atexit
import yaml
from PIL import Image as PilImage
import pandas as pd
import numpy as np
import plotly.express as px
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.core.image import Image as CoreImage
from kivy.properties import NumericProperty, ObjectProperty, StringProperty, \
ListProperty, BooleanProperty
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.lang.builder import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.scrollview import ScrollView
from kivy.uix.spinner import SpinnerOption, Spinner
from donkeycar import load_config
from donkeycar.parts.keras import KerasMemory
from donkeycar.parts.tub_v2 import Tub
from donkeycar.pipeline.augmentations import ImageAugmentation
from donkeycar.pipeline.database import PilotDatabase
from donkeycar.pipeline.types import TubRecord
from donkeycar.utils import get_model_by_type
from donkeycar.pipeline.training import train
Logger.propagate = False
Builder.load_file(os.path.join(os.path.dirname(__file__), 'ui.kv'))
Window.clearcolor = (0.2, 0.2, 0.2, 1)
LABEL_SPINNER_TEXT = 'Add/remove'
# Data struct to show tub field in the progress bar, containing the name,
# the name of the maximum value in the config file and if it is centered.
FieldProperty = namedtuple('FieldProperty',
['field', 'max_value_id', 'centered'])
def get_norm_value(value, cfg, field_property, normalised=True):
max_val_key = field_property.max_value_id
max_value = getattr(cfg, max_val_key, 1.0)
out_val = value / max_value if normalised else value * max_value
return out_val
def tub_screen():
return App.get_running_app().tub_screen if App.get_running_app() else None
def pilot_screen():
return App.get_running_app().pilot_screen if App.get_running_app() else None
def train_screen():
return App.get_running_app().train_screen if App.get_running_app() else None
def car_screen():
return App.get_running_app().car_screen if App.get_running_app() else None
def recursive_update(target, source):
""" Recursively update dictionary """
if isinstance(target, dict) and isinstance(source, dict):
for k, v in source.items():
v_t = target.get(k)
if not recursive_update(v_t, v):
target[k] = v
return True
else:
return False
def decompose(field):
""" Function to decompose a string vector field like 'gyroscope_1' into a
tuple ('gyroscope', 1) """
field_split = field.split('_')
if len(field_split) > 1 and field_split[-1].isdigit():
return '_'.join(field_split[:-1]), int(field_split[-1])
return field, None
class RcFileHandler:
""" This handles the config file which stores the data, like the field
mapping for displaying of bars and last opened car, tub directory. """
# These entries are expected in every tub, so they don't need to be in
# the file
known_entries = [
FieldProperty('user/angle', '', centered=True),
FieldProperty('user/throttle', '', centered=False),
FieldProperty('pilot/angle', '', centered=True),
FieldProperty('pilot/throttle', '', centered=False),
]
def __init__(self, file_path='~/.donkeyrc'):
self.file_path = os.path.expanduser(file_path)
self.data = self.create_data()
recursive_update(self.data, self.read_file())
self.field_properties = self.create_field_properties()
def exit_hook():
self.write_file()
# Automatically save config when program ends
atexit.register(exit_hook)
def create_field_properties(self):
""" Merges known field properties with the ones from the file """
field_properties = {entry.field: entry for entry in self.known_entries}
field_list = self.data.get('field_mapping')
if field_list is None:
field_list = {}
for entry in field_list:
assert isinstance(entry, dict), \
'Dictionary required in each entry in the field_mapping list'
field_property = FieldProperty(**entry)
field_properties[field_property.field] = field_property
return field_properties
def create_data(self):
data = dict()
data['user_pilot_map'] = {'user/throttle': 'pilot/throttle',
'user/angle': 'pilot/angle'}
return data
def read_file(self):
if os.path.exists(self.file_path):
with open(self.file_path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
Logger.info(f'Donkeyrc: Donkey file {self.file_path} loaded.')
return data
else:
Logger.warn(f'Donkeyrc: Donkey file {self.file_path} does not '
f'exist.')
return {}
def write_file(self):
if os.path.exists(self.file_path):
Logger.info(f'Donkeyrc: Donkey file {self.file_path} updated.')
with open(self.file_path, mode='w') as f:
self.data['time_stamp'] = datetime.now()
data = yaml.dump(self.data, f)
return data
rc_handler = RcFileHandler()
class MySpinnerOption(SpinnerOption):
""" Customization for Spinner """
pass
class MySpinner(Spinner):
""" Customization of Spinner drop down menu """
def __init__(self, **kwargs):
super().__init__(option_cls=MySpinnerOption, **kwargs)
class FileChooserPopup(Popup):
""" File Chooser popup window"""
load = ObjectProperty()
root_path = StringProperty()
filters = ListProperty()
class FileChooserBase:
""" Base class for file chooser widgets"""
file_path = StringProperty("No file chosen")
popup = ObjectProperty(None)
root_path = os.path.expanduser('~')
title = StringProperty(None)
filters = ListProperty()
def open_popup(self):
self.popup = FileChooserPopup(load=self.load, root_path=self.root_path,
title=self.title, filters=self.filters)
self.popup.open()
def load(self, selection):
""" Method to load the chosen file into the path and call an action"""
self.file_path = str(selection[0])
self.popup.dismiss()
self.load_action()
def load_action(self):
""" Virtual method to run when file_path has been updated """
pass
class ConfigManager(BoxLayout, FileChooserBase):
""" Class to mange loading of the config file from the car directory"""
config = ObjectProperty(None)
file_path = StringProperty(rc_handler.data.get('car_dir', ''))
def load_action(self):
""" Load the config from the file path"""
if self.file_path:
try:
path = os.path.join(self.file_path, 'config.py')
self.config = load_config(path)
# If load successful, store into app config
rc_handler.data['car_dir'] = self.file_path
except FileNotFoundError:
Logger.error(f'Config: Directory {self.file_path} has no '
f'config.py')
except Exception as e:
Logger.error(f'Config: {e}')
class TubLoader(BoxLayout, FileChooserBase):
""" Class to manage loading or reloading of the Tub from the tub directory.
Loading triggers many actions on other widgets of the app. """
file_path = StringProperty(rc_handler.data.get('last_tub', ''))
tub = ObjectProperty(None)
len = NumericProperty(1)
records = None
def load_action(self):
""" Update tub from the file path"""
if self.update_tub():
# If update successful, store into app config
rc_handler.data['last_tub'] = self.file_path
def update_tub(self, event=None):
if not self.file_path:
return False
# If config not yet loaded return
cfg = tub_screen().ids.config_manager.config
if not cfg:
return False
# At least check if there is a manifest file in the tub path
if not os.path.exists(os.path.join(self.file_path, 'manifest.json')):
tub_screen().status(f'Path {self.file_path} is not a valid tub.')
return False
try:
self.tub = Tub(self.file_path)
except Exception as e:
tub_screen().status(f'Failed loading tub: {str(e)}')
return False
# Check if filter is set in tub screen
expression = tub_screen().ids.tub_filter.filter_expression
# Use filter, this defines the function
def select(underlying):
if not expression:
return True
else:
try:
record = TubRecord(cfg, self.tub.base_path, underlying)
res = eval(expression)
return res
except KeyError as err:
Logger.error(f'Filter: {err}')
return True
self.records = [TubRecord(cfg, self.tub.base_path, record)
for record in self.tub if select(record)]
self.len = len(self.records)
if self.len > 0:
tub_screen().index = 0
tub_screen().ids.data_plot.update_dataframe_from_tub()
msg = f'Loaded tub {self.file_path} with {self.len} records'
else:
msg = f'No records in tub {self.file_path}'
if expression:
msg += f' using filter {tub_screen().ids.tub_filter.record_filter}'
tub_screen().status(msg)
return True
class LabelBar(BoxLayout):
""" Widget that combines a label with a progress bar. This is used to
display the record fields in the data panel."""
field = StringProperty()
field_property = ObjectProperty()
config = ObjectProperty()
msg = ''
def update(self, record):
""" This function is called everytime the current record is updated"""
if not record:
return
field, index = decompose(self.field)
if field in record.underlying:
val = record.underlying[field]
if index is not None:
val = val[index]
# Update bar if a field property for this field is known
if self.field_property:
norm_value = get_norm_value(val, self.config,
self.field_property)
new_bar_val = (norm_value + 1) * 50 if \
self.field_property.centered else norm_value * 100
self.ids.bar.value = new_bar_val
self.ids.field_label.text = self.field
if isinstance(val, float) or isinstance(val, np.float32):
text = f'{val:+07.3f}'
elif isinstance(val, int):
text = f'{val:10}'
else:
text = str(val)
self.ids.value_label.text = text
else:
Logger.error(f'Record: Bad record {record.underlying["_index"]} - '
f'missing field {self.field}')
class DataPanel(BoxLayout):
""" Data panel widget that contains the label/bar widgets and the drop
down menu to select/deselect fields."""
record = ObjectProperty()
# dual mode is used in the pilot arena where we only show angle and
# throttle or speed
dual_mode = BooleanProperty(False)
auto_text = StringProperty(LABEL_SPINNER_TEXT)
throttle_field = StringProperty('user/throttle')
link = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.labels = {}
self.screen = ObjectProperty()
def add_remove(self):
""" Method to add or remove a LabelBar. Depending on the value of the
drop down menu the LabelBar is added if it is not present otherwise
removed."""
field = self.ids.data_spinner.text
if field is LABEL_SPINNER_TEXT:
return
if field in self.labels and not self.dual_mode:
self.remove_widget(self.labels[field])
del(self.labels[field])
self.screen.status(f'Removing {field}')
else:
# in dual mode replace the second entry with the new one
if self.dual_mode and len(self.labels) == 2:
k, v = list(self.labels.items())[-1]
self.remove_widget(v)
del(self.labels[k])
field_property = rc_handler.field_properties.get(decompose(field)[0])
cfg = tub_screen().ids.config_manager.config
lb = LabelBar(field=field, field_property=field_property, config=cfg)
self.labels[field] = lb
self.add_widget(lb)
lb.update(self.record)
if len(self.labels) == 2:
self.throttle_field = field
self.screen.status(f'Adding {field}')
if self.screen.name == 'tub':
self.screen.ids.data_plot.plot_from_current_bars()
self.ids.data_spinner.text = LABEL_SPINNER_TEXT
self.auto_text = field
def on_record(self, obj, record):
""" Kivy function that is called every time self.record changes"""
for v in self.labels.values():
v.update(record)
def clear(self):
for v in self.labels.values():
self.remove_widget(v)
self.labels.clear()
class FullImage(Image):
""" Widget to display an image that fills the space. """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.core_image = None
def update(self, record):
""" This method is called ever time a record gets updated. """
try:
img_arr = self.get_image(record)
pil_image = PilImage.fromarray(img_arr)
bytes_io = io.BytesIO()
pil_image.save(bytes_io, format='png')
bytes_io.seek(0)
self.core_image = CoreImage(bytes_io, ext='png')
self.texture = self.core_image.texture
except KeyError as e:
Logger.error('Record: Missing key:', e)
except Exception as e:
Logger.error('Record: Bad record:', str(e))
def get_image(self, record):
return record.image(cached=False)
class ControlPanel(BoxLayout):
""" Class for control panel navigation. """
screen = ObjectProperty()
speed = NumericProperty(1.0)
record_display = StringProperty()
clock = None
fwd = None
def start(self, fwd=True, continuous=False):
"""
Method to cycle through records if either single <,> or continuous
<<, >> buttons are pressed
:param fwd: If we go forward or backward
:param continuous: If we do <<, >> or <, >
:return: None
"""
time.sleep(0.1)
call = partial(self.step, fwd, continuous)
if continuous:
self.fwd = fwd
s = float(self.speed) * tub_screen().ids.config_manager.config.DRIVE_LOOP_HZ
cycle_time = 1.0 / s
else:
cycle_time = 0.08
self.clock = Clock.schedule_interval(call, cycle_time)
def step(self, fwd=True, continuous=False, *largs):
"""
Updating a single step and cap/floor the index so we stay w/in the tub.
:param fwd: If we go forward or backward
:param continuous: If we are in continuous mode <<, >>
:param largs: dummy
:return: None
"""
new_index = self.screen.index + (1 if fwd else -1)
if new_index >= tub_screen().ids.tub_loader.len:
new_index = 0
elif new_index < 0:
new_index = tub_screen().ids.tub_loader.len - 1
self.screen.index = new_index
msg = f'Donkey {"run" if continuous else "step"} ' \
f'{"forward" if fwd else "backward"}'
if not continuous:
msg += f' - you can also use {"<right>" if fwd else "<left>"} key'
else:
msg += ' - you can toggle run/stop with <space>'
self.screen.status(msg)
def stop(self):
if self.clock:
self.clock.cancel()
self.clock = None
def restart(self):
if self.clock:
self.stop()
self.start(self.fwd, True)
def update_speed(self, up=True):
""" Method to update the speed on the controller"""
values = self.ids.control_spinner.values
idx = values.index(self.ids.control_spinner.text)
if up and idx < len(values) - 1:
self.ids.control_spinner.text = values[idx + 1]
elif not up and idx > 0:
self.ids.control_spinner.text = values[idx - 1]
def set_button_status(self, disabled=True):
""" Method to disable(enable) all buttons. """
self.ids.run_bwd.disabled = self.ids.run_fwd.disabled = \
self.ids.step_fwd.disabled = self.ids.step_bwd.disabled = disabled
def on_keyboard(self, key, scancode):
""" Method to chack with keystroke has ben sent. """
if key == ' ':
if self.clock and self.clock.is_triggered:
self.stop()
self.set_button_status(disabled=False)
self.screen.status('Donkey stopped')
else:
self.start(continuous=True)
self.set_button_status(disabled=True)
elif scancode == 79:
self.step(fwd=True)
elif scancode == 80:
self.step(fwd=False)
elif scancode == 45:
self.update_speed(up=False)
elif scancode == 46:
self.update_speed(up=True)
class PaddedBoxLayout(BoxLayout):
pass
class TubEditor(PaddedBoxLayout):
""" Tub editor widget. Contains left/right index interval and the
manipulator buttons for deleting / restoring and reloading """
lr = ListProperty([0, 0])
def set_lr(self, is_l=True):
""" Sets left or right range to the current tub record index """
if not tub_screen().current_record:
return
self.lr[0 if is_l else 1] = tub_screen().current_record.underlying['_index']
def del_lr(self, is_del):
""" Deletes or restores records in chosen range """
tub = tub_screen().ids.tub_loader.tub
if self.lr[1] >= self.lr[0]:
selected = list(range(*self.lr))
else:
last_id = tub.manifest.current_index
selected = list(range(self.lr[0], last_id))
selected += list(range(self.lr[1]))
tub.delete_records(selected) if is_del else tub.restore_records(selected)
class TubFilter(PaddedBoxLayout):
""" Tub filter widget. """
filter_expression = StringProperty(None)
record_filter = StringProperty(rc_handler.data.get('record_filter', ''))
def update_filter(self):
filter_text = self.ids.record_filter.text
# empty string resets the filter
if filter_text == '':
self.record_filter = ''
self.filter_expression = ''
rc_handler.data['record_filter'] = self.record_filter
tub_screen().status(f'Filter cleared')
return
filter_expression = self.create_filter_string(filter_text)
try:
record = tub_screen().current_record
res = eval(filter_expression)
status = f'Filter result on current record: {res}'
if isinstance(res, bool):
self.record_filter = filter_text
self.filter_expression = filter_expression
rc_handler.data['record_filter'] = self.record_filter
else:
status += ' - non bool expression can\'t be applied'
status += ' - press <Reload tub> to see effect'
tub_screen().status(status)
except Exception as e:
tub_screen().status(f'Filter error on current record: {e}')
@staticmethod
def create_filter_string(filter_text, record_name='record'):
""" Converts text like 'user/angle' into 'record.underlying['user/angle']
so that it can be used in a filter. Will replace only expressions that
are found in the tub inputs list.
:param filter_text: input text like 'user/throttle > 0.1'
:param record_name: name of the record in the expression
:return: updated string that has all input fields wrapped
"""
for field in tub_screen().current_record.underlying.keys():
field_list = filter_text.split(field)
if len(field_list) > 1:
filter_text = f'{record_name}.underlying["{field}"]'\
.join(field_list)
return filter_text
class DataPlot(PaddedBoxLayout):
""" Data plot panel which embeds matplotlib interactive graph"""
df = ObjectProperty(force_dispatch=True, allownone=True)
def plot_from_current_bars(self, in_app=True):
""" Plotting from current selected bars. The DataFrame for plotting
should contain all bars except for strings fields and all data is
selected if bars are empty. """
tub = tub_screen().ids.tub_loader.tub
field_map = dict(zip(tub.manifest.inputs, tub.manifest.types))
# Use selected fields or all fields if nothing is slected
all_cols = tub_screen().ids.data_panel.labels.keys() or self.df.columns
cols = [c for c in all_cols if decompose(c)[0] in field_map
and field_map[decompose(c)[0]] not in ('image_array', 'str')]
df = self.df[cols]
if df is None:
return
# Don't plot the milliseconds time stamp as this is a too big number
df = df.drop(labels=['_timestamp_ms'], axis=1, errors='ignore')
if in_app:
tub_screen().ids.graph.df = df
else:
fig = px.line(df, x=df.index, y=df.columns, title=tub.base_path)
fig.update_xaxes(rangeslider=dict(visible=True))
fig.show()
def unravel_vectors(self):
""" Unravels vector and list entries in tub which are created
when the DataFrame is created from a list of records"""
manifest = tub_screen().ids.tub_loader.tub.manifest
for k, v in zip(manifest.inputs, manifest.types):
if v == 'vector' or v == 'list':
dim = len(tub_screen().current_record.underlying[k])
df_keys = [k + f'_{i}' for i in range(dim)]
self.df[df_keys] = pd.DataFrame(self.df[k].tolist(),
index=self.df.index)
self.df.drop(k, axis=1, inplace=True)
def update_dataframe_from_tub(self):
""" Called from TubManager when a tub is reloaded/recreated. Fills
the DataFrame from records, and updates the dropdown menu in the
data panel."""
generator = (t.underlying for t in tub_screen().ids.tub_loader.records)
self.df = pd.DataFrame(generator).dropna()
to_drop = {'cam/image_array'}
self.df.drop(labels=to_drop, axis=1, errors='ignore', inplace=True)
self.df.set_index('_index', inplace=True)
self.unravel_vectors()
tub_screen().ids.data_panel.ids.data_spinner.values = self.df.columns
self.plot_from_current_bars()
class TabBar(BoxLayout):
manager = ObjectProperty(None)
def disable_only(self, bar_name):
this_button_name = bar_name + '_btn'
for button_name, button in self.ids.items():
button.disabled = button_name == this_button_name
class TubScreen(Screen):
""" First screen of the app managing the tub data. """
index = NumericProperty(None, force_dispatch=True)
current_record = ObjectProperty(None)
keys_enabled = BooleanProperty(True)
def initialise(self, e):
self.ids.config_manager.load_action()
self.ids.tub_loader.update_tub()
def on_index(self, obj, index):
""" Kivy method that is called if self.index changes"""
self.current_record = self.ids.tub_loader.records[index]
self.ids.slider.value = index
def on_current_record(self, obj, record):
""" Kivy method that is called if self.current_record changes."""
self.ids.img.update(record)
i = record.underlying['_index']
self.ids.control_panel.record_display = f"Record {i:06}"
def status(self, msg):
self.ids.status.text = msg
def on_keyboard(self, instance, keycode, scancode, key, modifiers):
if self.keys_enabled:
self.ids.control_panel.on_keyboard(key, scancode)
class PilotLoader(BoxLayout, FileChooserBase):
""" Class to mange loading of the config file from the car directory"""
num = StringProperty()
model_type = StringProperty()
pilot = ObjectProperty(None)
filters = ['*.h5', '*.tflite', '*.savedmodel', '*.trt']
def load_action(self):
if self.file_path and self.pilot:
try:
self.pilot.load(os.path.join(self.file_path))
rc_handler.data['pilot_' + self.num] = self.file_path
rc_handler.data['model_type_' + self.num] = self.model_type
self.ids.pilot_spinner.text = self.model_type
Logger.info(f'Pilot: Successfully loaded {self.file_path}')
except FileNotFoundError:
Logger.error(f'Pilot: Model {self.file_path} not found')
except Exception as e:
Logger.error(f'Failed loading {self.file_path}: {e}')
def on_model_type(self, obj, model_type):
""" Kivy method that is called if self.model_type changes. """
if self.model_type and self.model_type != 'Model type':
cfg = tub_screen().ids.config_manager.config
if cfg:
self.pilot = get_model_by_type(self.model_type, cfg)
self.ids.pilot_button.disabled = False
if 'tflite' in self.model_type:
self.filters = ['*.tflite']
elif 'tensorrt' in self.model_type:
self.filters = ['*.trt']
else:
self.filters = ['*.h5', '*.savedmodel']
def on_num(self, e, num):
""" Kivy method that is called if self.num changes. """
self.file_path = rc_handler.data.get('pilot_' + self.num, '')
self.model_type = rc_handler.data.get('model_type_' + self.num, '')
class OverlayImage(FullImage):
""" Widget to display the image and the user/pilot data for the tub. """
pilot = ObjectProperty()
pilot_record = ObjectProperty()
throttle_field = StringProperty('user/throttle')
def __init__(self, **kwargs):
super().__init__(**kwargs)
def augment(self, img_arr):
if pilot_screen().trans_list:
img_arr = pilot_screen().transformation.run(img_arr)
if pilot_screen().aug_list:
img_arr = pilot_screen().augmentation.run(img_arr)
return img_arr
def get_image(self, record):
from donkeycar.management.makemovie import MakeMovie
orig_img_arr = super().get_image(record)
aug_img_arr = self.augment(orig_img_arr)
img_arr = copy(aug_img_arr)
angle = record.underlying['user/angle']
throttle = get_norm_value(
record.underlying[self.throttle_field],
tub_screen().ids.config_manager.config,
rc_handler.field_properties[self.throttle_field])
rgb = (0, 255, 0)
MakeMovie.draw_line_into_image(angle, throttle, False, img_arr, rgb)
if not self.pilot:
return img_arr
output = (0, 0)
try:
# Not each model is supported in each interpreter
output = self.pilot.run(aug_img_arr)
except Exception as e:
Logger.error(e)
rgb = (0, 0, 255)
MakeMovie.draw_line_into_image(output[0], output[1], True, img_arr, rgb)
out_record = copy(record)
out_record.underlying['pilot/angle'] = output[0]
# rename and denormalise the throttle output
pilot_throttle_field \
= rc_handler.data['user_pilot_map'][self.throttle_field]
out_record.underlying[pilot_throttle_field] \
= get_norm_value(output[1], tub_screen().ids.config_manager.config,
rc_handler.field_properties[self.throttle_field],
normalised=False)
self.pilot_record = out_record
return img_arr
class PilotScreen(Screen):
""" Screen to do the pilot vs pilot comparison ."""
index = NumericProperty(None, force_dispatch=True)
current_record = ObjectProperty(None)
keys_enabled = BooleanProperty(False)
aug_list = ListProperty(force_dispatch=True)
augmentation = ObjectProperty()
trans_list = ListProperty(force_dispatch=True)
transformation = ObjectProperty()
config = ObjectProperty()
def on_index(self, obj, index):
""" Kivy method that is called if self.index changes. Here we update
self.current_record and the slider value. """
if tub_screen().ids.tub_loader.records:
self.current_record = tub_screen().ids.tub_loader.records[index]
self.ids.slider.value = index
def on_current_record(self, obj, record):
""" Kivy method that is called when self.current_index changes. Here
we update the images and the control panel entry."""
i = record.underlying['_index']
self.ids.pilot_control.record_display = f"Record {i:06}"
self.ids.img_1.update(record)
self.ids.img_2.update(record)
def initialise(self, e):
self.ids.pilot_loader_1.on_model_type(None, None)
self.ids.pilot_loader_1.load_action()
self.ids.pilot_loader_2.on_model_type(None, None)
self.ids.pilot_loader_2.load_action()
mapping = copy(rc_handler.data['user_pilot_map'])
del(mapping['user/angle'])
self.ids.data_in.ids.data_spinner.values = mapping.keys()
self.ids.data_in.ids.data_spinner.text = 'user/angle'
self.ids.data_panel_1.ids.data_spinner.disabled = True
self.ids.data_panel_2.ids.data_spinner.disabled = True
def map_pilot_field(self, text):
""" Method to return user -> pilot mapped fields except for the
intial vale called Add/remove. """
if text == LABEL_SPINNER_TEXT:
return text
return rc_handler.data['user_pilot_map'][text]
def set_brightness(self, val=None):
if self.ids.button_bright.state == 'down':
self.config.AUG_MULTIPLY_RANGE = (val, val)
if 'MULTIPLY' not in self.aug_list:
self.aug_list.append('MULTIPLY')
elif 'MULTIPLY' in self.aug_list:
self.aug_list.remove('MULTIPLY')
# update dependency
self.on_aug_list(None, None)
def set_blur(self, val=None):
if self.ids.button_blur.state == 'down':
self.config.AUG_BLUR_RANGE = (val, val)
if 'BLUR' not in self.aug_list:
self.aug_list.append('BLUR')
elif 'BLUR' in self.aug_list:
self.aug_list.remove('BLUR')
# update dependency
self.on_aug_list(None, None)
def on_aug_list(self, obj, aug_list):
self.config.AUGMENTATIONS = self.aug_list
self.augmentation = ImageAugmentation(self.config, 'AUGMENTATIONS')
self.on_current_record(None, self.current_record)
def on_trans_list(self, obj, trans_list):
self.config.TRANSFORMATIONS = self.trans_list
self.transformation = ImageAugmentation(self.config, 'TRANSFORMATIONS')
self.on_current_record(None, self.current_record)
def set_mask(self, state):
if state == 'down':
self.ids.status.text = 'Trapezoidal mask on'
self.trans_list.append('TRAPEZE')
else:
self.ids.status.text = 'Trapezoidal mask off'
if 'TRAPEZE' in self.trans_list:
self.trans_list.remove('TRAPEZE')
def set_crop(self, state):
if state == 'down':
self.ids.status.text = 'Crop on'
self.trans_list.append('CROP')
else:
self.ids.status.text = 'Crop off'
if 'CROP' in self.trans_list:
self.trans_list.remove('CROP')
def status(self, msg):
self.ids.status.text = msg
def on_keyboard(self, instance, keycode, scancode, key, modifiers):
if self.keys_enabled:
self.ids.pilot_control.on_keyboard(key, scancode)
class ScrollableLabel(ScrollView):
pass
class DataFrameLabel(Label):
pass
class TransferSelector(BoxLayout, FileChooserBase):
""" Class to select transfer model"""
filters = ['*.h5']
class TrainScreen(Screen):
""" Class showing the training screen. """
config = ObjectProperty(force_dispatch=True, allownone=True)
database = ObjectProperty()
pilot_df = ObjectProperty(force_dispatch=True)
tub_df = ObjectProperty(force_dispatch=True)
def train_call(self, model_type, *args):
# remove car directory from path
tub_path = tub_screen().ids.tub_loader.tub.base_path
transfer = self.ids.transfer_spinner.text
if transfer != 'Choose transfer model':
transfer = os.path.join(self.config.MODELS_PATH, transfer + '.h5')
else:
transfer = None
try:
history = train(self.config, tub_paths=tub_path,
model_type=model_type,
transfer=transfer,
comment=self.ids.comment.text)
self.ids.status.text = f'Training completed.'
self.ids.comment.text = 'Comment'
self.ids.train_button.state = 'normal'
self.ids.transfer_spinner.text = 'Choose transfer model'
self.reload_database()
except Exception as e:
self.ids.status.text = f'Train error {e}'
def train(self, model_type):
self.config.SHOW_PLOT = False
Thread(target=self.train_call, args=(model_type,)).start()
self.ids.status.text = f'Training started.'
def set_config_attribute(self, input):
try:
val = json.loads(input)
except ValueError:
val = input
att = self.ids.cfg_spinner.text.split(':')[0]
setattr(self.config, att, val)
self.ids.cfg_spinner.values = self.value_list()
self.ids.status.text = f'Setting {att} to {val} of type ' \
f'{type(val).__name__}'
def value_list(self):
if self.config:
return [f'{k}: {v}' for k, v in self.config.__dict__.items()]
else:
return ['select']
def on_config(self, obj, config):
if self.config and self.ids:
self.ids.cfg_spinner.values = self.value_list()
self.reload_database()
def reload_database(self):
if self.config:
self.database = PilotDatabase(self.config)
def on_database(self, obj, database):
if self.ids.check.state == 'down':
self.pilot_df, self.tub_df = self.database.to_df_tubgrouped()
self.ids.scroll_tubs.text = self.tub_df.to_string()
else:
self.pilot_df = self.database.to_df()
self.tub_df = pd.DataFrame()
self.ids.scroll_tubs.text = ''
self.pilot_df.drop(columns=['History', 'Config'], errors='ignore',
inplace=True)
text = self.pilot_df.to_string(formatters=self.formatter())
self.ids.scroll_pilots.text = text
values = ['Choose transfer model']
if not self.pilot_df.empty:
values += self.pilot_df['Name'].tolist()
self.ids.transfer_spinner.values = values
@staticmethod
def formatter():
def time_fmt(t):
fmt = '%Y-%m-%d %H:%M:%S'
return datetime.fromtimestamp(t).strftime(format=fmt)
def transfer_fmt(model_name):
return model_name.replace('.h5', '')
return {'Time': time_fmt, 'Transfer': transfer_fmt}
class CarScreen(Screen):
""" Screen for interacting with the car. """
config = ObjectProperty(force_dispatch=True, allownone=True)
files = ListProperty()
car_dir = StringProperty(rc_handler.data.get('robot_car_dir', '~/mycar'))
pull_bar = NumericProperty(0)
push_bar = NumericProperty(0)
event = ObjectProperty(None, allownone=True)
connection = ObjectProperty(None, allownone=True)
pid = NumericProperty(None, allownone=True)
pilots = ListProperty()
is_connected = BooleanProperty(False)
def initialise(self):
self.event = Clock.schedule_interval(self.connected, 3)
def list_remote_dir(self, dir):
if self.is_connected:
cmd = f'ssh {self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}' + \
f' "ls {dir}"'
listing = os.popen(cmd).read()
adjusted_listing = listing.split('\n')[1:-1]
return adjusted_listing
else:
return []
def list_car_dir(self, dir):
self.car_dir = dir
self.files = self.list_remote_dir(dir)
# non-empty director found
if self.files:
rc_handler.data['robot_car_dir'] = dir
def update_pilots(self):
model_dir = os.path.join(self.car_dir, 'models')
self.pilots = self.list_remote_dir(model_dir)
def pull(self, tub_dir):
target = f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}' + \
f':{os.path.join(self.car_dir, tub_dir)}'
if self.ids.create_dir.state == 'normal':
target += '/'
dest = self.config.DATA_PATH
cmd = ['rsync', '-rv', '--progress', '--partial', target, dest]
Logger.info('car pull: ' + str(cmd))
proc = Popen(cmd, shell=False, stdout=PIPE, text=True,
encoding='utf-8', universal_newlines=True)
repeats = 100
call = partial(self.show_progress, proc, repeats, True)
event = Clock.schedule_interval(call, 0.0001)
def send_pilot(self):
src = self.config.MODELS_PATH
# check if any sync buttons are pressed and update path accordingly
buttons = ['h5', 'savedmodel', 'tflite', 'trt']
select = [btn for btn in buttons if self.ids[f'btn_{btn}'].state
== 'down']
# build filter: for example this rsyncs all .tfilte models
# --include="*/" --include="*.tflite" --exclude="*"
filter = ['--include=*/']
for ext in select:
filter.append(f'--include=*.{ext}')
# if nothing selected, sync all
if not select:
filter.append('--include=*')
filter.append('--exclude=*')
dest = f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}:' + \
f'{self.car_dir}'
cmd = ['rsync', '-rv', '--progress', '--partial', *filter, src, dest]
Logger.info('car push: ' + ' '.join(cmd))
proc = Popen(cmd, shell=False, stdout=PIPE,
encoding='utf-8', universal_newlines=True)
repeats = 1
call = partial(self.show_progress, proc, repeats, False)
event = Clock.schedule_interval(call, 0.0001)
def show_progress(self, proc, repeats, is_pull, e):
if proc.poll() is not None:
# call ended this stops the schedule
return False
# find the next repeats lines with update info
count = 0
while True:
stdout_data = proc.stdout.readline()
if stdout_data:
# find 'to-check=33/4551)' which is end of line
pattern = 'to-check=(.*)\)'
res = re.search(pattern, stdout_data)
if res:
if count < repeats:
count += 1
else:
remain, total = tuple(res.group(1).split('/'))
bar = 100 * (1. - float(remain) / float(total))
if is_pull:
self.pull_bar = bar
else:
self.push_bar = bar
return True
else:
# end of stream command completed
if is_pull:
button = self.ids['pull_tub']
self.pull_bar = 0
else:
button = self.ids['send_pilots']
self.push_bar = 0
self.update_pilots()
button.disabled = False
return False
def connected(self, event):
if not self.config:
return
if self.connection is None:
if not hasattr(self.config, 'PI_USERNAME') or \
not hasattr(self.config, 'PI_HOSTNAME'):
self.ids.connected.text = 'Requires PI_USERNAME, PI_HOSTNAME'
return
# run new command to check connection status
cmd = ['ssh',
'-o ConnectTimeout=3',
f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}',
'date']
# Logger.info('car check: ' + ' '.join(cmd))
self.connection = Popen(cmd, shell=False, stdout=PIPE,
stderr=STDOUT, text=True,
encoding='utf-8', universal_newlines=True)
else:
# ssh is already running, check where we are
return_val = self.connection.poll()
self.is_connected = False
if return_val is None:
# command still running, do nothing and check next time again
status = 'Awaiting connection...'
self.ids.connected.color = 0.8, 0.8, 0.0, 1
else:
# command finished, check if successful and reset connection
if return_val == 0:
status = 'Connected'
self.ids.connected.color = 0, 0.9, 0, 1
self.is_connected = True
else:
status = 'Disconnected'
self.ids.connected.color = 0.9, 0, 0, 1
self.connection = None
self.ids.connected.text = status
def drive(self):
model_args = ''
if self.ids.pilot_spinner.text != 'No pilot':
model_path = os.path.join(self.car_dir, "models",
self.ids.pilot_spinner.text)
model_args = f'--type {self.ids.type_spinner.text} ' + \
f'--model {model_path}'
cmd = ['ssh',
f'{self.config.PI_USERNAME}@{self.config.PI_HOSTNAME}',
f'source env/bin/activate; cd {self.car_dir}; ./manage.py '
f'drive {model_args} 2>&1']
Logger.info(f'car connect: {cmd}')
proc = Popen(cmd, shell=False, stdout=PIPE, text=True,
encoding='utf-8', universal_newlines=True)
while True:
stdout_data = proc.stdout.readline()
if stdout_data:
# find 'PID: 12345'
pattern = 'PID: .*'
res = re.search(pattern, stdout_data)
if res:
try:
self.pid = int(res.group(0).split('PID: ')[1])
Logger.info(f'car connect: manage.py drive PID: '
f'{self.pid}')
except Exception as e:
Logger.error(f'car connect: {e}')
return
Logger.info(f'car connect: {stdout_data}')
else:
return
def stop(self):
if self.pid:
cmd = f'ssh {self.config.PI_USERNAME}@{self.config.PI_HOSTNAME} '\
+ f'kill {self.pid}'
out = os.popen(cmd).read()
Logger.info(f"car connect: Kill PID {self.pid} + {out}")
self.pid = None
class StartScreen(Screen):
img_path = os.path.realpath(os.path.join(
os.path.dirname(__file__),
'../parts/web_controller/templates/static/donkeycar-logo-sideways.png'))
pass
class DonkeyApp(App):
start_screen = None
tub_screen = None
train_screen = None
pilot_screen = None
car_screen = None
title = 'Donkey Manager'
def initialise(self, event):
self.tub_screen.ids.config_manager.load_action()
self.pilot_screen.initialise(event)
self.car_screen.initialise()
# This builds the graph which can only happen after everything else
# has run, therefore delay until the next round.
Clock.schedule_once(self.tub_screen.ids.tub_loader.update_tub)
def build(self):
self.start_screen = StartScreen(name='donkey')
self.tub_screen = TubScreen(name='tub')
self.train_screen = TrainScreen(name='train')
self.pilot_screen = PilotScreen(name='pilot')
self.car_screen = CarScreen(name='car')
Window.bind(on_keyboard=self.tub_screen.on_keyboard)
Window.bind(on_keyboard=self.pilot_screen.on_keyboard)
Clock.schedule_once(self.initialise)
sm = ScreenManager()
sm.add_widget(self.start_screen)
sm.add_widget(self.tub_screen)
sm.add_widget(self.train_screen)
sm.add_widget(self.pilot_screen)
sm.add_widget(self.car_screen)
return sm
def main():
tub_app = DonkeyApp()
tub_app.run()
if __name__ == '__main__':
main()
|
test_search_vectors.py | import pdb
import copy
import pytest
import threading
import datetime
import logging
from time import sleep
from multiprocessing import Process
import numpy
from milvus import Milvus, IndexType, MetricType
from utils import *
dim = 128
table_id = "test_search"
add_interval_time = 2
vectors = gen_vectors(100, dim)
# vectors /= numpy.linalg.norm(vectors)
# vectors = vectors.tolist()
nrpobe = 1
epsilon = 0.001
class TestSearchBase:
def init_data(self, connect, table, nb=100):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 100:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
# add_vectors /= numpy.linalg.norm(add_vectors)
# add_vectors = add_vectors.tolist()
status, ids = connect.add_vectors(table, add_vectors)
sleep(add_interval_time)
return add_vectors, ids
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index_params()
)
def get_index_params(self, request, args):
if "internal" not in args:
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in open source")
return request.param
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 99, 1024, 2048, 2049]
)
def get_top_k(self, request):
yield request.param
def test_search_top_k_flat_index(self, connect, table, get_top_k):
'''
target: test basic search fuction, all the search params is corrent, change top-k value
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
vectors, ids = self.init_data(connect, table)
query_vec = [vectors[0]]
top_k = get_top_k
nprobe = 1
status, result = connect.search_vectors(table, top_k, nrpobe, query_vec)
if top_k <= 2048:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert result[0][0].distance <= epsilon
assert check_result(result[0], ids[0])
else:
assert not status.OK()
def test_search_l2_index_params(self, connect, table, get_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, table)
status = connect.create_index(table, index_params)
query_vec = [vectors[0]]
top_k = 10
nprobe = 1
status, result = connect.search_vectors(table, top_k, nrpobe, query_vec)
logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert result[0][0].distance <= epsilon
else:
assert not status.OK()
def test_search_ip_index_params(self, connect, ip_table, get_index_params):
'''
target: test basic search fuction, all the search params is corrent, test all index params, and build
method: search with the given vectors, check the result
expected: search status ok, and the length of the result is top_k
'''
index_params = get_index_params
logging.getLogger().info(index_params)
vectors, ids = self.init_data(connect, ip_table)
status = connect.create_index(ip_table, index_params)
query_vec = [vectors[0]]
top_k = 10
nprobe = 1
status, result = connect.search_vectors(ip_table, top_k, nrpobe, query_vec)
logging.getLogger().info(result)
if top_k <= 1024:
assert status.OK()
assert len(result[0]) == min(len(vectors), top_k)
assert check_result(result[0], ids[0])
assert abs(result[0][0].distance - numpy.inner(numpy.array(query_vec[0]), numpy.array(query_vec[0]))) <= gen_inaccuracy(result[0][0].distance)
else:
assert not status.OK()
@pytest.mark.level(2)
def test_search_vectors_without_connect(self, dis_connect, table):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
query_vectors = [vectors[0]]
top_k = 1
nprobe = 1
with pytest.raises(Exception) as e:
status, ids = dis_connect.search_vectors(table, top_k, nprobe, query_vectors)
def test_search_table_name_not_existed(self, connect, table):
'''
target: search table not existed
method: search with the random table_name, which is not in db
expected: status not ok
'''
table_name = gen_unique_str("not_existed_table")
top_k = 1
nprobe = 1
query_vecs = [vectors[0]]
status, result = connect.search_vectors(table_name, top_k, nprobe, query_vecs)
assert not status.OK()
def test_search_table_name_None(self, connect, table):
'''
target: search table that table name is None
method: search with the table_name: None
expected: status not ok
'''
table_name = None
top_k = 1
nprobe = 1
query_vecs = [vectors[0]]
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(table_name, top_k, nprobe, query_vecs)
def test_search_top_k_query_records(self, connect, table):
'''
target: test search fuction, with search params: query_records
method: search with the given query_records, which are subarrays of the inserted vectors
expected: status ok and the returned vectors should be query_records
'''
top_k = 10
nprobe = 1
vectors, ids = self.init_data(connect, table)
query_vecs = [vectors[0],vectors[55],vectors[99]]
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
for i in range(len(query_vecs)):
assert len(result[i]) == top_k
assert result[i][0].distance <= epsilon
"""
generate invalid query range params
"""
@pytest.fixture(
scope="function",
params=[
(get_current_day(), get_current_day()),
(get_last_day(1), get_last_day(1)),
(get_next_day(1), get_next_day(1))
]
)
def get_invalid_range(self, request):
yield request.param
def test_search_invalid_query_ranges(self, connect, table, get_invalid_range):
'''
target: search table with query ranges
method: search with the same query ranges
expected: status not ok
'''
top_k = 2
nprobe = 1
vectors, ids = self.init_data(connect, table)
query_vecs = [vectors[0]]
query_ranges = [get_invalid_range]
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs, query_ranges=query_ranges)
assert not status.OK()
assert len(result) == 0
"""
generate valid query range params, no search result
"""
@pytest.fixture(
scope="function",
params=[
(get_last_day(2), get_last_day(1)),
(get_next_day(1), get_next_day(2))
]
)
def get_valid_range_no_result(self, request):
yield request.param
def test_search_valid_query_ranges_no_result(self, connect, table, get_valid_range_no_result):
'''
target: search table with normal query ranges, but no data in db
method: search with query ranges (low, low)
expected: length of result is 0
'''
top_k = 2
nprobe = 1
vectors, ids = self.init_data(connect, table)
query_vecs = [vectors[0]]
query_ranges = [get_valid_range_no_result]
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs, query_ranges=query_ranges)
assert status.OK()
assert len(result) == 0
"""
generate valid query range params, no search result
"""
@pytest.fixture(
scope="function",
params=[
(get_last_day(2), get_next_day(2)),
(get_current_day(), get_next_day(2)),
]
)
def get_valid_range(self, request):
yield request.param
def test_search_valid_query_ranges(self, connect, table, get_valid_range):
'''
target: search table with normal query ranges, but no data in db
method: search with query ranges (low, normal)
expected: length of result is 0
'''
top_k = 2
nprobe = 1
vectors, ids = self.init_data(connect, table)
query_vecs = [vectors[0]]
query_ranges = [get_valid_range]
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs, query_ranges=query_ranges)
assert status.OK()
assert len(result) == 1
assert result[0][0].distance <= epsilon
def test_search_distance_l2_flat_index(self, connect, table):
'''
target: search table, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nb = 2
top_k = 1
nprobe = 1
vectors, ids = self.init_data(connect, table, nb=nb)
query_vecs = [[0.50 for i in range(dim)]]
distance_0 = numpy.linalg.norm(numpy.array(query_vecs[0]) - numpy.array(vectors[0]))
distance_1 = numpy.linalg.norm(numpy.array(query_vecs[0]) - numpy.array(vectors[1]))
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert abs(numpy.sqrt(result[0][0].distance) - min(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
def test_search_distance_ip_flat_index(self, connect, ip_table):
'''
target: search ip_table, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nb = 2
top_k = 1
nprobe = 1
vectors, ids = self.init_data(connect, ip_table, nb=nb)
index_params = {
"index_type": IndexType.FLAT,
"nlist": 16384
}
connect.create_index(ip_table, index_params)
logging.getLogger().info(connect.describe_index(ip_table))
query_vecs = [[0.50 for i in range(dim)]]
distance_0 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[0]))
distance_1 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[1]))
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
assert abs(result[0][0].distance - max(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
def test_search_distance_ip_index_params(self, connect, ip_table, get_index_params):
'''
target: search table, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
top_k = 2
nprobe = 1
vectors, ids = self.init_data(connect, ip_table, nb=2)
index_params = get_index_params
connect.create_index(ip_table, index_params)
logging.getLogger().info(connect.describe_index(ip_table))
query_vecs = [[0.50 for i in range(dim)]]
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
distance_0 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[0]))
distance_1 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[1]))
assert abs(result[0][0].distance - max(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance)
# TODO: enable
# @pytest.mark.repeat(5)
@pytest.mark.timeout(30)
def _test_search_concurrent(self, connect, table):
vectors, ids = self.init_data(connect, table)
thread_num = 10
nb = 100
top_k = 10
threads = []
query_vecs = vectors[nb//2:nb]
def search():
status, result = connect.search_vectors(table, top_k, query_vecs)
assert len(result) == len(query_vecs)
for i in range(len(query_vecs)):
assert result[i][0].id in ids
assert result[i][0].distance == 0.0
for i in range(thread_num):
x = threading.Thread(target=search, args=())
threads.append(x)
x.start()
for th in threads:
th.join()
# TODO: enable
@pytest.mark.timeout(30)
def _test_search_concurrent_multiprocessing(self, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
process_num = 4
processes = []
table = gen_unique_str("test_search_concurrent_multiprocessing")
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_type': IndexType.FLAT,
'store_raw_vector': False}
# create table
milvus = Milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
vectors, ids = self.init_data(milvus, table, nb=nb)
query_vecs = vectors[nb//2:nb]
def search(milvus):
status, result = milvus.search_vectors(table, top_k, query_vecs)
assert len(result) == len(query_vecs)
for i in range(len(query_vecs)):
assert result[i][0].id in ids
assert result[i][0].distance == 0.0
for i in range(process_num):
milvus = Milvus()
milvus.connect(uri=uri)
p = Process(target=search, args=(milvus, ))
processes.append(p)
p.start()
time.sleep(0.2)
for p in processes:
p.join()
def test_search_multi_table_L2(search, args):
'''
target: test search multi tables of L2
method: add vectors into 10 tables, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nprobe = 1
tables = []
idx = []
for i in range(num):
table = gen_unique_str("test_add_multitable_%d" % i)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_file_size': 10,
'metric_type': MetricType.L2}
# create table
milvus = Milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
status, ids = milvus.add_vectors(table, vectors)
assert status.OK()
assert len(ids) == len(vectors)
tables.append(table)
idx.append(ids[0])
idx.append(ids[10])
idx.append(ids[20])
time.sleep(6)
query_vecs = [vectors[0], vectors[10], vectors[20]]
# start query from random table
for i in range(num):
table = tables[i]
status, result = milvus.search_vectors(table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
for j in range(len(query_vecs)):
assert len(result[j]) == top_k
for j in range(len(query_vecs)):
assert check_result(result[j], idx[3 * i + j])
def test_search_multi_table_IP(search, args):
'''
target: test search multi tables of IP
method: add vectors into 10 tables, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nprobe = 1
tables = []
idx = []
for i in range(num):
table = gen_unique_str("test_add_multitable_%d" % i)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
param = {'table_name': table,
'dimension': dim,
'index_file_size': 10,
'metric_type': MetricType.L2}
# create table
milvus = Milvus()
milvus.connect(uri=uri)
milvus.create_table(param)
status, ids = milvus.add_vectors(table, vectors)
assert status.OK()
assert len(ids) == len(vectors)
tables.append(table)
idx.append(ids[0])
idx.append(ids[10])
idx.append(ids[20])
time.sleep(6)
query_vecs = [vectors[0], vectors[10], vectors[20]]
# start query from random table
for i in range(num):
table = tables[i]
status, result = milvus.search_vectors(table, top_k, nprobe, query_vecs)
assert status.OK()
assert len(result) == len(query_vecs)
for j in range(len(query_vecs)):
assert len(result[j]) == top_k
for j in range(len(query_vecs)):
assert check_result(result[j], idx[3 * i + j])
"""
******************************************************************
# The following cases are used to test `search_vectors` function
# with invalid table_name top-k / nprobe / query_range
******************************************************************
"""
class TestSearchParamsInvalid(object):
nlist = 16384
index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist}
logging.getLogger().info(index_param)
def init_data(self, connect, table, nb=100):
'''
Generate vectors and add it in table, before search vectors
'''
global vectors
if nb == 100:
add_vectors = vectors
else:
add_vectors = gen_vectors(nb, dim)
status, ids = connect.add_vectors(table, add_vectors)
sleep(add_interval_time)
return add_vectors, ids
"""
Test search table with invalid table names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_table_names()
)
def get_table_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_search_with_invalid_tablename(self, connect, get_table_name):
table_name = get_table_name
logging.getLogger().info(table_name)
top_k = 1
nprobe = 1
query_vecs = gen_vectors(1, dim)
status, result = connect.search_vectors(table_name, top_k, nprobe, query_vecs)
assert not status.OK()
"""
Test search table with invalid top-k
"""
@pytest.fixture(
scope="function",
params=gen_invalid_top_ks()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_with_invalid_top_k(self, connect, table, get_top_k):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
logging.getLogger().info(top_k)
nprobe = 1
query_vecs = gen_vectors(1, dim)
if isinstance(top_k, int):
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
@pytest.mark.level(2)
def test_search_with_invalid_top_k_ip(self, connect, ip_table, get_top_k):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
logging.getLogger().info(top_k)
nprobe = 1
query_vecs = gen_vectors(1, dim)
if isinstance(top_k, int):
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
"""
Test search table with invalid nprobe
"""
@pytest.fixture(
scope="function",
params=gen_invalid_nprobes()
)
def get_nprobes(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_with_invalid_nrpobe(self, connect, table, get_nprobes):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = 1
nprobe = get_nprobes
logging.getLogger().info(nprobe)
query_vecs = gen_vectors(1, dim)
if isinstance(nprobe, int):
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(table, top_k, nprobe, query_vecs)
@pytest.mark.level(2)
def test_search_with_invalid_nrpobe_ip(self, connect, ip_table, get_nprobes):
'''
target: test search fuction, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = 1
nprobe = get_nprobes
logging.getLogger().info(nprobe)
query_vecs = gen_vectors(1, dim)
if isinstance(nprobe, int):
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
"""
Test search table with invalid query ranges
"""
@pytest.fixture(
scope="function",
params=gen_invalid_query_ranges()
)
def get_query_ranges(self, request):
yield request.param
@pytest.mark.level(1)
def test_search_flat_with_invalid_query_range(self, connect, table, get_query_ranges):
'''
target: test search fuction, with the wrong query_range
method: search with query_range
expected: raise an error, and the connection is normal
'''
top_k = 1
nprobe = 1
query_vecs = [vectors[0]]
query_ranges = get_query_ranges
logging.getLogger().info(query_ranges)
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(table, 1, nprobe, query_vecs, query_ranges=query_ranges)
@pytest.mark.level(2)
def test_search_flat_with_invalid_query_range_ip(self, connect, ip_table, get_query_ranges):
'''
target: test search fuction, with the wrong query_range
method: search with query_range
expected: raise an error, and the connection is normal
'''
top_k = 1
nprobe = 1
query_vecs = [vectors[0]]
query_ranges = get_query_ranges
logging.getLogger().info(query_ranges)
with pytest.raises(Exception) as e:
status, result = connect.search_vectors(ip_table, 1, nprobe, query_vecs, query_ranges=query_ranges)
def check_result(result, id):
if len(result) >= 5:
return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id]
else:
return id in (i.id for i in result) |
mjc_env.py | import matplotlib.pyplot as plt
import numpy as np
import os
import random
from threading import Thread
import time
from tkinter import TclError
import traceback
import sys
import xml.etree.ElementTree as xml
from dm_control.mujoco import Physics, TextOverlay
from dm_control.mujoco.wrapper.mjbindings import enums
from dm_control.rl.control import PhysicsError
from gym import spaces
from gym.core import Env
import opentamp
from opentamp.util_classes.mjc_xml_utils import *
from opentamp.util_classes import transform_utils as T
BASE_XML = os.getcwd() + '/opentamp'+'/robot_info/empty.xml'
ENV_XML = os.getcwd() + '/opentamp'+'/robot_info/current_empty.xml'
SPECIFIC_ENV_XML = os.getcwd() + '/temp/current_{0}.xml'
_MAX_FRONTBUFFER_SIZE = 2048
_CAM_WIDTH = 200
_CAM_HEIGHT = 150
CTRL_MODES = ['joint_angle', 'end_effector', 'end_effector_pos', 'discrete_pos', 'discrete']
class MJCEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array', 'depth'], 'video.frames_per_second': 67}
def __init__(self, mode='end_effector', obs_include=[], items=[], include_files=[], include_items=[], im_dims=(_CAM_WIDTH, _CAM_HEIGHT), sim_freq=25, timestep=0.002, max_iter=250, mult=3e2, view=False, load_render=True, act_jnts=[], xmlid='0'):
assert mode in CTRL_MODES, 'Env mode must be one of {0}'.format(CTRL_MODES)
self.ctrl_mode = mode
self.active = True
self.cur_time = 0.
self.prev_time = 0.
self.timestep = timestep
self.sim_freq = sim_freq
self.mult = 3e2
self.use_viewer = view
self.use_glew = 'MUJOCO_GL' not in os.environ or os.environ['MUJOCO_GL'] == 'glfw'
self.obs_include = obs_include
self._joint_map_cache = {}
self._ind_cache = {}
self._type_cache = {}
self._user_data = {}
self._cache_rendering = False
self._cached_images = {}
self._last_rendered_state = (None, None)
self.im_wid, self.im_height = im_dims
self.items = items
self._item_map = {item[0]: item for item in items}
self.include_files = include_files
self.include_items = include_items
self.item_names = list(self._item_map.keys()) + [item['name'] for item in include_items]
self.act_jnts = act_jnts
self.xmlid = xmlid
self._load_model()
self._set_obs_info(obs_include)
for item in self.include_items:
if item.get('is_fixed', False): continue
name = item['name']
pos = item.get('pos', (0, 0, 0))
quat = item.get("quat", (1, 0, 0, 0))
self.set_item_pos(name, pos)
self.set_item_rot(name, quat)
self.init_state = self.physics.data.qpos.copy()
self._init_control_info()
self._max_iter = max_iter
self._cur_iter = 0
self.load_render = load_render
if self.load_render:
try:
from dm_control import render
except:
from dm_control import _render as render
self._viewer = None
if view and self.load_render:
self.add_viewer()
self.render(camera_id=0)
self.render(camera_id=0)
@classmethod
def load_config(cls, config):
mode = config.get("mode", "joint_angle")
obs_include = config.get("obs_include", [])
items = config.get("items", [])
include_files = config.get("include_files", [])
include_items = config.get("include_items", [])
im_dims = config.get("image_dimensions", (_CAM_WIDTH, _CAM_HEIGHT))
sim_freq = config.get("sim_freq", 25)
ts = config.get("mjc_timestep", 0.002)
mult = config.get("step_mult", 3e2)
view = config.get("view", False)
max_iter = config.get("max_iterations", 250)
load_render = config.get("load_render", True)
act_jnts = config.get("act_jnts", [])
xmlid = config.get("xmlid", 0)
return cls(mode, obs_include, items, include_files, include_items, im_dims, sim_freq, ts, max_iter, mult, view, load_render=load_render, act_jnts=act_jnts, xmlid=xmlid)
def _load_model(self):
xmlpath = SPECIFIC_ENV_XML.format(self.xmlid)
generate_xml(BASE_XML, xmlpath, self.items, self.include_files, self.include_items, timestep=self.timestep)
self.physics = Physics.from_xml_path(xmlpath)
def _init_control_info(self):
print('No control information to initialize.')
def add_viewer(self):
if self._viewer is not None: return
self.cur_im = np.zeros((self.im_height, self.im_wid, 3))
self._launch_viewer(_CAM_WIDTH, _CAM_HEIGHT)
def _launch_viewer(self, width, height, title='Main'):
self._matplot_view_thread = None
if self.use_glew:
from dm_control.viewer import viewer
from dm_control.viewer import views
from dm_control.viewer import gui
from dm_control.viewer import renderer
self._renderer = renderer.NullRenderer()
self._render_surface = None
self._viewport = renderer.Viewport(width, height)
self._window = gui.RenderWindow(width, height, title)
self._viewer = viewer.Viewer(
self._viewport, self._window.mouse, self._window.keyboard)
self._viewer_layout = views.ViewportLayout()
self._viewer.render()
else:
self._viewer = None
self._matplot_im = None
self._run_matplot_view()
def _reload_viewer(self):
if self._viewer is None or not self.use_glew: return
if self._render_surface:
self._render_surface.free()
if self._renderer:
self._renderer.release()
self._render_surface = render.Renderer(
max_width=_MAX_FRONTBUFFER_SIZE, max_height=_MAX_FRONTBUFFER_SIZE)
self._renderer = renderer.OffScreenRenderer(
self.physics.model, self._render_surface)
self._renderer.components += self._viewer_layout
self._viewer.initialize(
self.physics, self._renderer, touchpad=False)
self._viewer.zoom_to_scene()
def _render_viewer(self, pixels):
if self.use_glew:
with self._window._context.make_current() as ctx:
ctx.call(
self._window._update_gui_on_render_thread, self._window._context.window, pixels)
self._window._mouse.process_events()
self._window._keyboard.process_events()
else:
if self._matplot_im is not None:
self._matplot_im.set_data(pixels)
plt.draw()
def _run_matplot_view(self):
self._matplot_view_thread = Thread(target=self._launch_matplot_view)
self._matplot_view_thread.daemon = True
self._matplot_view_thread.start()
def _launch_matplot_view(self):
try:
# self._matplot_im = plt.imshow(self.render(view=False))
self._matplot_im = plt.imshow(self.cur_im)
plt.show()
except TclError:
print('\nCould not find display to launch viewer (this does not affect the ability to render images)\n')
@property
def qpos(self):
return self.physics.data.qpos
@property
def qvel(self):
return self.physics.data.qvel
@property
def qacc(self):
return self.physics.data.qacc
def step(self, action, mode=None, obs_include=None, gen_obs=True, view=False, debug=False):
for t in range(self.sim_freq):
cur_state = self.physics.data.qpos.copy()
cur_act = self.get_jnt_vec(self.act_jnts)
if mode is None or mode == 'position' or mode == 'joint_angle':
self.physics.set_control(action)
elif mode == 'velocity':
self.physics.set_control(self.mult*(action-cur_act))
qacc = self.physics.data.actuator_force.copy()
try:
self.physics.step()
except PhysicsError as e:
#traceback.print_exception(*sys.exc_info())
print('\nERROR IN PHYSICS SIMULATION; RESETTING ENV.\n')
self.physics.reset()
self.physics.data.qpos[:] = cur_state[:]
self.physics.forward()
if not gen_obs: return
return self.get_obs(obs_include=obs_include, view=view), \
self.compute_reward(), \
self.is_done(), \
{}
def get_sensors(self, sensors=[]):
if not len(sensors):
return self.physics.data.sensordata.copy()
inds = [self.physics.model.name2id[s] for s in sensors]
return self.physics.data.sensordata[inds]
def get_state(self):
return self.physics.data.qpos.copy()
def set_state(self, state):
self.physics.data.qpos[:] = state
self.physics.forward()
'''
def __getstate__(self):
return self.physics.data.qpos.tolist()
'''
'''
def __setstate__(self, state):
self.physics.data.qpos[:] = state
self.physics.forward()
'''
def _set_obs_info(self, obs_include):
self._obs_inds = {}
self._obs_shape = {}
ind = 0
if 'overhead_image' in obs_include or not len(obs_include):
self._obs_inds['overhead_image'] = (ind, ind+3*self.im_wid*self.im_height)
self._obs_shape['overhead_image'] = (self.im_height, self.im_wid, 3)
ind += 3*self.im_wid*self.im_height
# if 'forward_image' in obs_include or not len(obs_include):
# self._obs_inds['forward_image'] = (ind, ind+3*self.im_wid*self.im_height)
# self._obs_shape['forward_image'] = (self.im_height, self.im_wid, 3)
# ind += 3*self.im_wid*self.im_height
for item, xml, info in self.items:
if item in obs_include or not len(obs_include):
self._obs_inds[item] = (ind, ind+3) # Only store 3d Position
self._obs_shape[item] = (3,)
ind += 3
self.dO = ind
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(ind,), dtype='float32')
return ind
def get_obs(self, obs_include=None, view=False):
obs = np.zeros(self.dO)
if obs_include is None:
obs_include = self.obs_include
if self.load_render:
if view or not len(obs_include) or 'overhead_image' in obs_include:
pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=0, view=view)
if 'overhead_image' in self._obs_inds:
inds = self._obs_inds['overhead_image']
obs[inds[0]:inds[1]] = pixels.flatten()
# if not len(obs_include) or 'forward_image' in obs_include:
# pixels = self.render(height=self.im_height, width=self.im_wid, camera_id=1, view=view)
# inds = self._obs_inds['forward_image']
# obs[inds[0]:inds[1]] = pixels.flatten()
for item in self.items:
if not len(obs_include) or item[0] in obs_include:
inds = self._obs_inds[item[0]]
obs[inds[0]:inds[1]] = self.get_item_pos(item[0])
return np.array(obs)
def get_obs_types(self):
return list(self._obs_inds.keys())
def get_obs_inds(self, obs_type):
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
return self._obs_inds[obs_type]
def get_obs_shape(self, obs_type):
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
return self._obs_shape[obs_type]
def get_obs_data(self, obs, obs_type):
obs = np.array(obs)
if obs_type not in self._obs_inds:
raise KeyError('{0} is not a valid observation for this environment. Valid options: {1}'.format(obs_type, self.get_obs_types()))
inds = self._obs_inds[obs_type]
return obs[inds[0]:inds[1]].reshape(self._obs_shape[obs_type])
def get_attr(self, name, attr, mujoco_frame=True):
if attr.find('ee_pos') >= 0:
name = attr.replace('ee_pos', 'gripper')
attr = 'pose'
if attr in self.geom.jnt_names:
jnts = self._jnt_inds[attr]
bnds = self.geom.get_joint_limits(attr)
vals = self.get_joints(jnts, vec=True)
return np.maximum(np.minimum(bnds[1], vals), bnds[0])
if attr == 'pose' or attr == 'pos':
return self.get_item_pos(name, mujoco_frame)
if attr in ['rot', 'rotation', 'quat', 'euler']:
euler = attr == 'euler'
return self.get_item_rot(name, mujoco_frame, euler)
if hasattr(self, 'get_{}'.format(attr)):
return getattr(self, 'get_{}'.format(attr))(name, mujoco_frame=True)
raise NotImplementedError('Could not retrieve value of {} for {}'.format(attr, name))
def set_attr(self, name, attr, val, mujoco_frame=True, forward=True):
if attr in self.geom.jnt_names:
jnts = self.geom.jnt_names[attr]
if len(val) == 1: val = [val[0] for _ in jnts]
return self.set_joints(dict(zip(jnts, val)), forward=forward)
if attr == 'pose' or attr == 'pos':
return self.set_item_pos(name, val, mujoco_frame, forward=forward)
if attr in ['rot', 'rotation', 'quat', 'euler']:
return self.set_item_rot(name, val, mujoco_frame, forward=forward)
if hasattr(self, 'set_{}'.format(attr)):
return getattr(self, 'set_{}'.format(attr))(name, val, mujoco_frame, forward=forward)
raise NotImplementedError('Could not set value of {} for {}'.format(attr, name))
def get_pos_from_label(self, label, mujoco_frame=True):
try:
pos = self.get_item_pos(label, mujoco_frame)
except:
pos = None
return pos
def get_item_pos(self, name, mujoco_frame=True, rot=False):
model = self.physics.model
item_type = 'joint'
if name in self._type_cache:
item_type = self._type_cache[name]
pos = [np.nan, np.nan, np.nan]
if rot: pos.append(np.nan)
if item_type == 'joint':
try:
ind = model.name2id(name, 'joint')
adr = model.jnt_qposadr[ind]
if rot:
pos = self.physics.data.qpos[adr+3:adr+7].copy()
else:
pos = self.physics.data.qpos[adr:adr+3].copy()
self._type_cache[name] = 'joint'
except Exception as e:
item_type = 'body'
if item_type == 'body':
try:
item_ind = model.name2id(name, 'body')
arr = self.physics.data.xquat if rot else self.physics.data.xpos
pos = arr[item_ind].copy()
# pos = self.physics.data.xpos[item_ind].copy()
self._type_cache[name] = 'body'
except Exception as e:
item_ind = -1
assert not np.any(np.isnan(pos))
return pos
def get_item_rot(self, name, mujoco_frame=True, to_euler=False):
rot = self.get_item_pos(name, mujoco_frame, True)
if to_euler:
rot = T.quaternion_to_euler(rot)
return rot
def set_item_pos(self, name, pos, mujoco_frame=True, forward=True, rot=False):
item_type = 'joint'
if np.any(np.isnan(pos)): return
if name in self._type_cache:
item_type = self._type_cache[name]
if item_type == 'joint':
try:
ind = self.physics.model.name2id(name, 'joint')
adr = self.physics.model.jnt_qposadr[ind]
if rot:
old_pos = self.physics.data.qpos[adr+3:adr+7]
self.physics.data.qpos[adr+3:adr+7] = pos
else:
old_pos = self.physics.data.qpos[adr:adr+3]
self.physics.data.qpos[adr:adr+3] = pos
self._type_cache[name] = 'joint'
except Exception as e:
item_type = 'body'
if item_type == 'body':
try:
ind = self.physics.model.name2id(name, 'body')
if rot:
old_pos = self.physics.data.xquat[ind]
self.physics.data.xquat[ind] = pos
else:
old_pos = self.physics.data.xpos[ind]
self.physics.data.xpos[ind] = pos
self.physics.model.body_pos[ind] = pos
# old_pos = self.physics.model.body_pos[ind]
item_type = 'body'
self._type_cache[name] = 'body'
except:
item_type = 'unknown'
print(('Could not shift item', name))
if forward:
self.physics.forward()
def set_item_rot(self, name, rot, use_euler=False, mujoco_frame=True, forward=True):
if use_euler or len(rot) == 3:
rot = T.euler_to_quaternion(rot, 'wxyz')
self.set_item_pos(name, rot, mujoco_frame, forward, True)
def get_joints(self, jnts, sizes=None, vec=False):
if vec:
vals = []
else:
vals = {}
for i, jnt in enumerate(jnts):
if type(jnt) is not int:
jnt = self.physics.model.name2id(jnt, 'joint')
adr = self.physics.model.jnt_qposadr[jnt]
size = 1
if sizes is not None:
size = sizes[i]
if vec:
vals.extend(self.physics.data.qpos[adr:adr+size])
else:
name = self.physics.model.id2name(jnt, 'joint')
vals[name] = self.physics.data.qpos[adr:adr+size]
return vals
def set_joints(self, jnts, forward=True):
for jnt, val in list(jnts.items()):
if type(jnt) is not int:
jnt = self.physics.model.name2id(jnt, 'joint')
adr = self.physics.model.jnt_qposadr[jnt]
offset = 1
if hasattr(val, '__len__'):
offset = len(val)
self.physics.data.qpos[adr:adr+offset] = val
if forward:
self.physics.forward()
def get_jnt_vec(self, jnts):
if not len(jnts): return self.physics.data.qpos
vals = []
for name in jnts:
ind = self.physics.model.name2id(name, 'joint')
adr = self.physics.model.jnt_qposadr[ind]
vals.append(adr)
return self.physics.data.qpos[vals]
def get_disp(self, body1, body2):
pos1 = self.get_item_pos(body1)
pos2 = self.get_itme_pos(body2)
return pos2 - pos1
def get_body_info(self):
info = {}
for i in range(self.physics.model.nbody):
info[i] = {
'name': self.physics.model.id2name(i, 'body'),
'pos': self.physics.data.xpos[i],
'quat': self.physics.data.xquat[i],
}
return info
def get_jnt_info(self):
info = {}
dofadr = self.physics.model.jnt_dofadr
for i in range(self.physics.model.njnt):
inds = (dofadr[i], dofadr[i+1]) if i < self.physics.model.njnts-1 else (dofadr[i], self.physics.model.njnt)
body_id = self.physics.model.jnt_bodyid[i]
info[i] = {
'name': self.physics.model.id2name(i, 'joint'),
'angle': self.physics.data.qpos[inds[0]:inds[1]],
'dofadr': inds,
'body': self.physics.model.id2name(body_id, 'body'),
'parent_body': self.physics.model.id2name(self.physics.model.body_parentid[body_id], 'body')
}
return info
def get_geom_dimensions(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1):
'''
Geom type options:
mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
'''
if geom_ind >= 0:
return self.physics.model.geom_size[ind]
inds = np.where(self.physics.model.geom_type == geom_type)
return self.physics.model.geom_size[inds]
def get_geom_positions(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1):
'''
Geom type options:
mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
'''
if geom_ind >= 0:
return self.physics.model.geom_pos[ind]
inds = np.where(self.physics.model.geom_type == geom_type)
return self.physics.data.geom_xpos[inds]
# def get_geom_rotations(self, geom_type=enums.mjtGeom.mjGEOM_BOX, geom_ind=-1, use_euler=False):
# '''
# Geom type options:
# mjGEOM_PLANE=0, mjGEOM_HFIELD=1, mjGEOM_SPHERE=2, mjGEOM_CAPSULE=3, mjGEOM_ELLIPSOID=4, mjGEOM_CYLINDER=5, mjGEOM_BOX=6, mjGEOM_MESH=7
# '''
# if geom_ind >= 0:
# return self.physics.model.geom_quat[ind]
# inds = np.where(self.physics.model.geom_type == geom_type)
# rots = self.physics.data.geom_xquat[inds]
# if use_euler:
# return np.array([T.quaternion_to_euler(r) for r in rots])
# return rots
def get_camera_info(self, camera_name):
ind = self.physics.model.name2id(camera_name, 'camera')
fovy = self.physics.model.cam_fovy[ind].copy()
pos = self.physics.data.cam_xpos[ind].copy()
mat = self.physics.data.cam_xmat[ind].copy()
return fovy, pos, mat
def record_video(self, fname, actions=None, states=None, height=0, width=0, mode='position'):
if not self.load_render:
raise AssertionError('Cannot record video if the renderer is not loaded')
elif actions is None and states is None:
raise AssertionError('Must pass either action or state trajectory to record video')
ims = []
buf = actions if actions is not None else states
for step in buf:
if actions is not None: self.step(step, mode=mode)
if states is not None: self.set_state(step)
im = self.render(camera_id=camera_id, height=height, width=width, view=False)
ims.append(im)
np.save(fname, ims)
def set_user_data(self, key, data):
self._user_data[key] = data
def get_user_data(self, key, default=None):
return self._user_data.get(key, default)
def compute_reward(self):
return 0
def is_done(self):
return self._cur_iter >= self._max_iter
def get_text_overlay(self, title='', body='', style='normal', position='top left'):
return TextOverlay(title, body, style, position)
def render(self, mode='rgb_array', height=0, width=0, camera_id=0,
overlays=(), depth=False, scene_option=None, view=False,
forward=False):
if not self.load_render: return None
# Make friendly with dm_control or gym interface
depth = depth or mode == 'depth_array'
view = view or mode == 'human'
if height == 0: height = self.im_height
if width == 0: width = self.im_wid
if forward: self.physics.forward()
pixels = None
if self._cache_rendering:
prev_x, prev_q = self._last_rendered_state
x_changed = prev_x is None or np.any(np.abs(prev_x - self.physics.data.xpos) > 1e-5)
q_changed = prev_q is None or np.any(np.abs(prev_q - self.physics.data.qpos) > 1e-5)
if x_changed or q_changed:
self._cached_images = {}
self._last_rendered_state = (self.physics.data.xpos.copy(), self.physics.data.qpos.copy())
elif (camera_id, height, width) in self._cached_images:
pixels = self._cached_images[(camera_id, height, width)]
if pixels is None:
pixels = self.physics.render(height, width, camera_id, overlays, depth, scene_option)
if self._cache_rendering: self._cached_images[(camera_id, height, width)] = pixels
if view and self.use_viewer:
self._render_viewer(pixels)
return pixels
def reset(self):
self._cur_iter = 0
self.physics.reset()
# self._reload_viewer()
self.ctrl_data = {}
self.cur_time = 0.
self.prev_time = 0.
self.physics.data.qpos[:] = 0.
self.physics.data.qvel[:] = 0.
self.physics.data.qacc[:]= 0.
self.physics.forward()
return self.get_obs()
def close(self):
self.active = False
if self._viewer is not None and self.use_glew:
self._viewer.close()
self._viewer = None
self.physics.free()
def seed(self, seed=None):
np.random.seed(seed)
random.seed(seed)
def list_joint_info(self):
for i in range(self.physics.model.njnt):
print('\n')
print(('Jnt ', i, ':', self.physics.model.id2name(i, 'joint')))
print(('Axis :', self.physics.model.jnt_axis[i]))
print(('Dof adr :', self.physics.model.jnt_dofadr[i]))
body_id = self.physics.model.jnt_bodyid[i]
print(('Body :', self.physics.model.id2name(body_id, 'body')))
print(('Parent body :', self.physics.model.id2name(self.physics.model.body_parentid[body_id], 'body')))
|
CPULoader.py | #!/usr/bin/python
#
# Program never ends. Press Ctrl-C to terminate
#
import multiprocessing
def calculate():
x = 1.0
while True:
x = x + 1.0
if __name__ == '__main__':
for i in range(multiprocessing.cpu_count()):
p = multiprocessing.Process(target=calculate)
p.start()
|
B.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return 'JDBot is up and running!!'
def run():
app.run(host='0.0.0.0', port=3000)
def b():
server = Thread(target=run)
server.start() |
run_synth_tests.py | import os
import sys
import multiprocessing
sys.path.append(os.path.join("..", ".."))
sys.path.append(os.path.join("..", "..", "algo"))
import params.PDL1NetConfig as pdl1_config
import algo.PDL1Net.PDL1NetTester as Tester
import algo.mrcnn.model as modellib
ROOT_DIR = os.path.join(os.getcwd(), "..", "..")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
def run_test(args):
class InferenceConfig(pdl1_config.PDL1NetConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
BACKBONE = args.backbone
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs)
model.load_weights(args.weights, by_name=True)
path, epoch_name = os.path.split(args.weights)
path, log_name = os.path.split(path)
epoch = os.path.splitext(epoch_name)[0].split("_")[-1]
output_dir_name = "{}_{}".format(log_name, epoch)
tester = Tester.PDL1NetTester(model, args)
tester.test_sequence(result_dir_name=output_dir_name)
class Arguments:
def __init__(self, weights, backbone, dataset):
self.weights = weights
self.backbone = backbone
self.dataset = dataset
self.logs = DEFAULT_LOGS_DIR
if __name__ == "__main__":
weights_root = r"D:\Nati\Itamar_n_Shai\Mask_RCNN\logs"
logs_name = [
'synth_iou05_c1_bg0',
'synth_iou0_c0_bg1',
'synth_iou0_c1_bg0',
'synth_iou0_c1_bg1'
]
logs = [os.path.join(weights_root, name) for name in logs_name]
dataset_root = r"D:\Nati\Itamar_n_Shai\Datasets\DataSynth"
datasets = [
'output_IoU0.5_C1_BG0',
'output_IoU0_C0_BG1',
'output_IoU0_C1_BG0',
'output_IoU0_C1_BG1'
]
datasets = [os.path.join(dataset_root, name) for name in datasets]
backbone = "resnet50"
for log_path, dataset_path in zip(logs, datasets):
weight_path = os.path.join(log_path, "mask_rcnn_pdl1_0090.h5")
args = Arguments(weight_path, backbone, dataset_path)
# p = multiprocessing.Process(target=run_test, args=(args,))
# p.start()
# p.join()
run_test(args)
pass
|
systray.py | import time
from io import StringIO
from threading import Lock, Thread
from typing import List
from PyQt5.QtCore import QThread, pyqtSignal, QCoreApplication, Qt, QSize
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QSystemTrayIcon, QMenu, QWidget, QSizePolicy
from bauh import __app_name__
from bauh.api.abstract.controller import SoftwareManager
from bauh.api.abstract.model import PackageUpdate
from bauh.view.qt import qt_utils
from bauh.view.qt.about import AboutDialog
from bauh.view.qt.settings import SettingsWindow
from bauh.view.qt.view_utils import load_resource_icon
from bauh.view.qt.window import ManageWindow
from bauh.view.util import util
from bauh.view.util.translation import I18n
class UpdateCheck(QThread):
signal = pyqtSignal(list)
def __init__(self, manager: SoftwareManager, check_interval: int, parent=None):
super(UpdateCheck, self).__init__(parent)
self.check_interval = check_interval
self.manager = manager
def run(self):
while True:
updates = self.manager.list_updates()
self.signal.emit(updates)
time.sleep(self.check_interval)
class TrayIcon(QSystemTrayIcon):
def __init__(self, i18n: I18n, manager: SoftwareManager, manage_window: ManageWindow, config: dict, screen_size: QSize):
super(TrayIcon, self).__init__()
self.i18n = i18n
self.manager = manager
self.screen_size = screen_size
if config['ui']['tray']['default_icon']:
self.icon_default = QIcon(config['ui']['tray']['default_icon'])
else:
self.icon_default = QIcon.fromTheme('bauh_tray_default')
if self.icon_default.isNull():
self.icon_default = load_resource_icon('img/logo.svg', 24)
if config['ui']['tray']['updates_icon']:
self.icon_updates = QIcon(config['ui']['tray']['updates_icon'])
else:
self.icon_updates = QIcon.fromTheme('bauh_tray_updates')
if self.icon_updates.isNull():
self.icon_updates = load_resource_icon('img/logo_update.svg', 24)
self.setIcon(self.icon_default)
self.menu = QMenu()
self.action_manage = self.menu.addAction(self.i18n['tray.action.manage'])
self.action_manage.triggered.connect(self.show_manage_window)
self.action_settings = self.menu.addAction(self.i18n['settings'].capitalize())
self.action_settings.triggered.connect(self.show_settings_window)
self.action_about = self.menu.addAction(self.i18n['tray.action.about'])
self.action_about.triggered.connect(self.show_about)
self.action_exit = self.menu.addAction(self.i18n['tray.action.exit'])
self.action_exit.triggered.connect(lambda: QCoreApplication.exit())
self.setContextMenu(self.menu)
self.manage_window = None
self.dialog_about = None
self.settings_window = None
self.check_thread = UpdateCheck(check_interval=int(config['updates']['check_interval']), manager=self.manager)
self.check_thread.signal.connect(self.notify_updates)
self.check_thread.start()
self.last_updates = set()
self.update_notification = bool(config['system']['notifications'])
self.lock_notify = Lock()
self.activated.connect(self.handle_click)
self.set_default_tooltip()
self.manage_window = manage_window
def set_default_tooltip(self):
self.setToolTip('{} ({})'.format(self.i18n['manage_window.title'], __app_name__).lower())
def handle_click(self, reason):
if reason == self.Trigger:
self.show_manage_window()
def verify_updates(self, notify_user: bool = True):
Thread(target=self._verify_updates, args=(notify_user,)).start()
def _verify_updates(self, notify_user: bool):
self.notify_updates(self.manager.list_updates(), notify_user=notify_user)
def notify_updates(self, updates: List[PackageUpdate], notify_user: bool = True):
self.lock_notify.acquire()
try:
if len(updates) > 0:
update_keys = {'{}:{}:{}'.format(up.type, up.id, up.version) for up in updates}
new_icon = self.icon_updates
if update_keys.difference(self.last_updates):
self.last_updates = update_keys
n_updates = len(updates)
ups_by_type = {}
for key in update_keys:
ptype = key.split(':')[0]
count = ups_by_type.get(ptype)
count = 1 if count is None else count + 1
ups_by_type[ptype] = count
msg = StringIO()
msg.write(self.i18n['notification.update{}'.format('' if n_updates == 1 else 's')].format(n_updates))
if len(ups_by_type) > 1:
for ptype, count in ups_by_type.items():
msg.write('\n * {} ( {} )'.format(ptype.capitalize(), count))
msg.seek(0)
msg = msg.read()
self.setToolTip(msg)
if self.update_notification and notify_user:
util.notify_user(msg=msg)
else:
self.last_updates.clear()
new_icon = self.icon_default
self.set_default_tooltip()
if self.icon().cacheKey() != new_icon.cacheKey(): # changes the icon if needed
self.setIcon(new_icon)
finally:
self.lock_notify.release()
def show_manage_window(self):
if self.manage_window.isMinimized():
self.manage_window.setWindowState(Qt.WindowNoState)
elif not self.manage_window.isVisible():
self.manage_window.refresh_apps()
self.manage_window.show()
def show_settings_window(self):
if self.settings_window:
self.settings_window.handle_display()
else:
self.settings_window = SettingsWindow(manager=self.manager,
i18n=self.i18n,
screen_size=self.screen_size,
tray=self,
window=self.manage_window)
self.settings_window.setMinimumWidth(int(self.screen_size.width() / 4))
self.settings_window.adjustSize()
qt_utils.centralize(self.settings_window)
self.settings_window.show()
def show_about(self):
if self.dialog_about is None:
self.dialog_about = AboutDialog(self.i18n)
if self.dialog_about.isHidden():
self.dialog_about.show()
|
s20.py | """ Orbivo S20. """
import binascii
import struct
import logging
import socket
import threading
import time
_LOGGER = logging.getLogger(__name__)
# S20 UDP port
PORT = 10000
# UDP best-effort.
RETRIES = 3
TIMEOUT = 1.0
DISCOVERY_TIMEOUT = 1.0
# Timeout after which to renew device subscriptions
SUBSCRIPTION_TIMEOUT = 60
# Packet constants.
MAGIC = b'\x68\x64'
DISCOVERY = b'\x00\x06\x71\x61'
DISCOVERY_RESP = b'\x00\x2a\x71\x61'
SUBSCRIBE = b'\x00\x1e\x63\x6c'
SUBSCRIBE_RESP = b'\x00\x18\x63\x6c'
CONTROL = b'\x00\x17\x64\x63'
CONTROL_RESP = b'\x00\x17\x73\x66'
PADDING_1 = b'\x20\x20\x20\x20\x20\x20'
PADDING_2 = b'\x00\x00\x00\x00'
ON = b'\x01'
OFF = b'\x00'
# Socket
_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Buffer
_BUFFER = {}
def _listen():
""" Listen on socket. """
while True:
data, addr = _SOCKET.recvfrom(1024)
_BUFFER[addr[0]] = data
def start(addr = ''):
""" Set up module.
Open a UDP socket, and listen in a thread.
"""
_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_SOCKET.bind((addr, PORT))
udp = threading.Thread(target=_listen)
udp.start()
def stop():
""" Close the socket"""
_SOCKET.close()
def _device_time(tab):
ts = struct.unpack('<L', tab)[0] - 2208988800
return ts
def discover(timeout=DISCOVERY_TIMEOUT):
""" Discover devices on the local network.
:param timeout: Optional timeout in seconds.
:returns: Set of discovered host addresses.
"""
hosts = {}
payload = MAGIC + DISCOVERY
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT))
start = time.time()
while time.time() < start + timeout:
for host, data in _BUFFER.copy().items():
if not _is_discovery_response(data):
continue
if host not in hosts:
_LOGGER.debug("Discovered device at %s", host)
entry = {}
entry['mac'] = data[7:13]
entry['imac'] = data[19:25]
entry['next'] = 0
entry['st'] = int(data[-1])
entry['time'] = _device_time(data[37:41])
entry['serverTime'] = int(time.time())
hosts[host] = entry
return hosts
def _is_discovery_response(data):
""" Is this a discovery response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + DISCOVERY_RESP)
def _is_subscribe_response(data):
""" Is this a subscribe response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + SUBSCRIBE_RESP)
def _is_control_response(data):
""" Is this a control response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + CONTROL_RESP)
class S20Exception(Exception):
""" S20 exception. """
pass
class S20(object):
""" Controls an Orbivo S20 WiFi Smart Socket.
http://www.orvibo.com/en_products_view.asp?mid=15&pid=4&id=234
Protocol documentation: http://pastebin.com/LfUhsbcS
"""
def __init__(self, host, mac = None):
""" Initialize S20 object.
:param host: IP or hostname of device.
"""
self.host = host
if not mac:
(self._mac, self._mac_reversed) = self._discover_mac()
else:
if type(mac) is str:
self._mac = binascii.a2b_hex(''.join(mac.split(':')))
else:
self._mac = mac
ba = bytearray(self._mac)
ba.reverse()
self._mac_reversed = bytes(ba)
self._subscribe()
@property
def on(self):
""" State property.
:returns: State of device (on/off).
"""
return self._subscribe()
@on.setter
def on(self, state):
""" Change device state.
:param state: True (on) or False (off).
"""
if state:
self._turn_on()
else:
self._turn_off()
def _discover_mac(self):
""" Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address.
"""
mac = None
mac_reversed = None
cmd = MAGIC + DISCOVERY
resp = self._udp_transact(cmd, self._discovery_resp,
broadcast=True,
timeout=DISCOVERY_TIMEOUT)
if resp:
(mac, mac_reversed) = resp
if mac is None:
raise S20Exception("Couldn't discover {}".format(self.host))
return (mac, mac_reversed)
def _subscribe(self):
""" Subscribe to the device.
A subscription serves two purposes:
- Returns state (on/off).
- Enables state changes on the device
for a short period of time.
"""
cmd = MAGIC + SUBSCRIBE + self._mac \
+ PADDING_1 + self._mac_reversed + PADDING_1
status = self._udp_transact(cmd, self._subscribe_resp)
if status is not None:
self.last_subscribed = time.time()
return status == ON
else:
raise S20Exception(
"No status could be found for {}".format(self.host))
def _subscription_is_recent(self):
""" Check if subscription occurred recently.
:returns: Yes (True) or no (False)
"""
return self.last_subscribed > time.time() - SUBSCRIPTION_TIMEOUT
def _control(self, state):
""" Control device state.
Possible states are ON or OFF.
:param state: Switch to this state.
"""
# Renew subscription if necessary
if not self._subscription_is_recent():
self._subscribe()
cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state
_LOGGER.debug("Sending new state to %s: %s", self.host, ord(state))
ack_state = self._udp_transact(cmd, self._control_resp, state)
if ack_state is None:
raise S20Exception(
"Device didn't acknowledge control request: {}".format(
self.host))
def _discovery_resp(self, data):
""" Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
"""
if _is_discovery_response(data):
_LOGGER.debug("Discovered MAC of %s: %s", self.host,
binascii.hexlify(data[7:13]).decode())
return (data[7:13], data[19:25])
def _subscribe_resp(self, data):
""" Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
"""
if _is_subscribe_response(data):
status = bytes([data[23]])
_LOGGER.debug("Successfully subscribed to %s, state: %s",
self.host, ord(status))
return status
def _control_resp(self, data, state):
""" Handle a control response.
:param data: Payload.
:param state: Requested state.
:returns: Acknowledged state.
"""
if _is_control_response(data):
ack_state = bytes([data[22]])
if state == ack_state:
_LOGGER.debug("Received state ack from %s, state: %s",
self.host, ord(ack_state))
return ack_state
def _udp_transact(self, payload, handler, *args,
broadcast=False, timeout=TIMEOUT):
""" Complete a UDP transaction.
UDP is stateless and not guaranteed, so we have to
take some mitigation steps:
- Send payload multiple times.
- Wait for awhile to receive response.
:param payload: Payload to send.
:param handler: Response handler.
:param args: Arguments to pass to response handler.
:param broadcast: Send a broadcast instead.
:param timeout: Timeout in seconds.
"""
if self.host in _BUFFER:
del _BUFFER[self.host]
host = self.host
if broadcast:
host = '255.255.255.255'
retval = None
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), (host, PORT))
start = time.time()
while time.time() < start + timeout:
data = _BUFFER.get(self.host, None)
if data:
retval = handler(data, *args)
# Return as soon as a response is received
if retval:
return retval
def _turn_on(self):
""" Turn on the device. """
self._control(ON)
def _turn_off(self):
""" Turn off the device. """
self._control(OFF)
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import do_server_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \
UTF8Deserializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
port, auth_secret = sock_info
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
sockfile = sock.makefile("rwb", 65536)
do_server_auth(sockfile, auth_secret)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
__init__.py | # -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils.color
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import msgpack
from salt.ext import six
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
AIX = True if sys.platform.startswith('aix') else False
if (AIX or DARWIN) and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD or AIX:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
encoding = 'utf-8'
unpacker_kwargs = {}
if msgpack.version >= (0, 5, 2):
unpacker_kwargs['raw'] = False
else:
unpacker_kwargs['encoding'] = encoding
unpacker = msgpack.Unpacker(**unpacker_kwargs)
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 500
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
self.minion_targets = set(['minion', 'sub_minion'])
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.prep_syndic()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
if self.parser.options.proxy:
self.minion_targets.add(self.proxy_opts['id'])
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.proxy_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
start_tcp_daemons = start_zeromq_daemons
def prep_syndic(self):
'''
Create a roster file for salt's syndic
'''
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(roster_path, RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.path.which('ssh-keygen')
sshd = salt.utils.path.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err)))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
self.prep_syndic()
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = 'cache'
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
master_opts['pki_dir'] = 'pki'
master_opts['syndic_master'] = 'localhost'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
file_tree = {
'root_dir': os.path.join(FILES, 'pillar', 'base', 'file_tree'),
'follow_dir_links': False,
'keep_newline': True,
}
master_opts['ext_pillar'].append({'file_tree': file_tree})
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.is_windows():
virtualenv_binary = os.path.join(real_prefix, 'Scripts', 'virtualenv.exe')
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get('PATH')
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ['PATH'] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.which('virtualenv')
if path is not None:
# Restore previous environ PATH
os.environ['PATH'] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = 'cache'
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
minion_opts['pki_dir'] = 'pki'
minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
minion_opts['venv_bin'] = virtualenv_binary
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = 'cache'
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = 'pki'
sub_minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
sub_minion_opts['venv_bin'] = virtualenv_binary
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = 'cache'
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = 'pki'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_syndic_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
syndic_master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = 'cache'
# proxy_opts['user'] = running_tests_user
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = 'pki'
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
master_opts.setdefault('reactor', []).append(
{
'salt/test/reactor': [
os.path.join(FILES, 'reactor-test.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
# all read, only owner write
autosign_file_permissions = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(opts_dict['root_dir'], 'autosign_file')
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'autosign_file'),
new_autosign_file_path
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
conf['runtests_log_level'] = os.environ.get('TESTS_MIN_LOG_LEVEL_NAME') or 'debug'
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
salt.utils.yaml.safe_dump(computed_config, fp_, default_flow_style=False)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(sub_minion_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_master_computed_config, wfh, default_flow_style=False)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
try:
self.sub_minion_process.terminate()
except AttributeError:
pass
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
if os.path.exists(path):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception:
log.exception('Failed to remove directory: %s', dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
'''
Ensure all minions and masters (including sub-masters) are connected.
'''
while True:
try:
ret = self.client.run_job('*', 'test.ping')
except salt.exceptions.SaltClientError:
ret = None
if ret and 'minions' not in ret:
continue
if ret and sorted(ret['minions']) == sorted(self.minion_targets):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
player.py | import os
import threading
from tkinter import *
from tkinter import filedialog
from mutagen.mp3 import MP3
import tkinter.messagebox
import time
from tkinter import ttk
from ttkthemes import ThemedTk as tk
from pygame import mixer
import sqlite3
conn=sqlite3.connect('users.db')
c=conn.cursor()
# ---- Initialization -------------
root = tk(theme='equilux')
root.get_themes()
root.set_theme("arc")
playlist = []
# --- Menubar ---
menubar = Menu(root)
root.config(menu=menubar)
# -------------------
# creating submenu
submenu = Menu(menubar, tearoff=0)
def listofplaylist():
query1="SELECT * FROM list"
c.execute(query1)
playlist_list=c.fetchall()
i=0
length=len(playlist)
# while length :
# playlist_list_box1.delete(length)
# #playlist_list_box1.pop(length)
# length=length-1
for item in playlist_list:
name=item[0]
playlist_list_box1.insert(i,name)
i=i+1
def add_to_playlist(f):
f = os.path.basename(browseFile.fileName)
index = 0
playlistbox.insert(index,f)
playlist.insert(index,browseFile.fileName)
index = index + 1
def browseFile():
browseFile.fileName = filedialog.askopenfilename()
add_to_playlist(browseFile.fileName)
insertintoplaylist()
def loadfromdb():
user_name = userentry.get()
sql="SELECT * FROM "+user_name
try:
c.execute(sql)
except:
tkinter.messagebox.showerror(title="ERROR", message="No Playlist found")
songlist=c.fetchall()
i=0
while i<len(songlist) :
browseFile.fileName=songlist[i][0]
add_to_playlist(browseFile.fileName)
playlist=songlist[i][0]
i+=1
def newplaylist():
user_name=userentry.get()
sql="CREATE TABLE "+user_name+' ("filepath" TEXT)'
sql2="INSERT INTO list(playlist) VALUES (?)"
c.execute(sql)
c.execute(sql2,(user_name,))
conn.commit()
listofplaylist()
def insertintoplaylist():
user_name=userentry.get()
sql="INSERT INTO "+user_name+"(filepath) VALUES (?)"
c.execute(sql,(browseFile.fileName,))
conn.commit()
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
#### future work delete the song from database #####
# user_name=userentry.get()
# conn=sqlite3.connect('users.db')
# c=conn.cursor()
# sql="SELECT * FROM "+user_name
# c.execute(sql)
# songlist=c.fetchall()
# for item in songlist:
# browseFile.fileName=item[0]
# f=os.path.basename(browseFile.fileName)
# if f == selected_song:
# query = " DELETE FROM " +user_name+ " WHERE filepath = '"+item[0]+"' "
# c.execute(query)
# conn.commit()
# print("commmit")
# --- Left Frame ---
leftframe = ttk.Frame(root)
leftframe.pack(side=LEFT, padx = 20, pady = 10)
addPhoto = PhotoImage(file="Images/add.png")
delPhoto = PhotoImage(file="Images/minus.png")
addBtn = Button(leftframe, image=addPhoto, command = browseFile)
delBtn = Button(leftframe, image=delPhoto, command = del_song)
label3=ttk.Label(leftframe,text="Current Songs")
label3.pack()
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn.pack()
delBtn.pack(pady=10)
# --- Right Frame ---
rightframe = ttk.Frame(root)
rightframe.pack()
#--------------------
# --- Top Frame -----
topframe = ttk.Frame(root)
topframe.pack(pady = 20)
#---------------
# ----------------
# --- Submenu ---
menubar.add_cascade(label="File", menu=submenu)
submenu.add_command(label="Open", command=browseFile)
submenu.add_command(label="Load Playlist", command=loadfromdb)
submenu.add_command(label="Exit", command=root.destroy)
# ----------------
#--- Info about us ---
def aboutUs():
tkinter.messagebox.showinfo('About Us', 'This Project is made by : \n1) Parth (18CE2019) \n2) Sushil (18CE2011) \n3) Prithvi (18CE2007)\n4) Samiksha(18C32030)')
# -------------------
submenu = Menu(menubar,tearoff=0)
menubar.add_cascade(label="Help", menu=submenu)
submenu.add_command(label="About Us", command=aboutUs)
# ---------------
# MAIN
mixer.init()
root.title("MP3 Player")
root.iconbitmap(r'icon.ico')
# -- Images -------
playPhoto = PhotoImage(file="Images/play.png")
pausePhoto = PhotoImage(file="Images/pause.png")
stopPhoto = PhotoImage(file="Images/Stop.png")
unmutePhoto = PhotoImage(file="Images/lmute.png")
mutePhoto = PhotoImage(file="Images/mute.png")
# --------------
usernamelabel = ttk.Label(topframe, text='Play List:')
usernamelabel.pack()
userentry = ttk.Entry(topframe)
userentry.pack()
usernamelabel1 = ttk.Label(topframe)
usernamelabel1.pack()
loadbtn=ttk.Button(topframe,command=loadfromdb,text='''Load''')
loadbtn.pack()
usernamelabel2 = ttk.Label(topframe)
usernamelabel2.pack()
newplaylist=ttk.Button(topframe,command=newplaylist,text='''New Playlist''')
newplaylist.pack()
usernamelabel2 = ttk.Label(topframe,text="")
usernamelabel2.pack()
lengthlabel = ttk.Label(topframe, text='Total Length : --:--')
lengthlabel.pack()
currentlabel = ttk.Label(topframe, text="Current Time : --:-- ")
currentlabel.pack()
# --- Middle Frame ---
middleFrame = ttk.Frame(root, relief=RAISED, borderwidth=1)
middleFrame.pack(padx = 30, pady = 20)
#----Playlist List Box ----#
label4=ttk.Label(leftframe,text="List of playlist")
label4.pack()
playlist_list_box1 = Listbox(leftframe)
playlist_list_box1.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] ==".mp3":
audio = MP3(play_song)
total_length=audio.info.length
else:
tkinter.messagebox.showerror('Error 000002x', 'Use a mp3 file only')
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1= threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currentlabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time = current_time+1
# --- Play music ---
def playFunction():
global paused
if paused:
mixer.music.unpause()
# playBtn.configure(image=pausePhoto)
paused = FALSE
else:
try:
stopFunction()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
show_details(play_it)
paused = FALSE
except FileNotFoundError:
tkinter.messagebox.showerror('Error 000001x ', 'The file could not be found or is corrupted.')
else:
print("No error")
playBtn = Button(middleFrame, image=playPhoto, command=playFunction, highlightthickness = 0, bd = 0)
playBtn.grid(row=2, column=0, padx = 10)
# --------------------
paused = FALSE
# --- Pause music ---
def pauseFunction():
global paused
paused = TRUE
mixer.music.pause()
pauseBtn = Button(middleFrame, image = pausePhoto, command = lambda : pauseFunction(), highlightthickness = 0, bd = 0)
pauseBtn.grid(row = 2,column=1, padx = 10)
# --------------------
# --- Unpause Function -----
# ----------------------------
# --- Stop music -----
def stopFunction():
global stopped
stopped = TRUE
mixer.music.stop()
stopBtn = Button(middleFrame, image=stopPhoto, command=lambda:stopFunction(), highlightthickness=0, bd=0)
stopBtn.grid(row = 2,column=2, padx = 10)
# ---------------------
# --- Bottom Frame -----
bottomframe = ttk.Frame(root, relief=RAISED, borderwidth=1)
bottomframe.pack(padx = 30, pady=20)
# ----------------------
# --- mute and unmute ----
muted = FALSE
def muteFunction():
global muted
if muted :
unmuteBtn.configure(image=unmutePhoto)
mixer.music.set_volume(0.5)
muted = FALSE
scale.set(50)
else:
unmuteBtn.configure(image=mutePhoto)
mixer.music.set_volume(0)
scale.set(0)
muted = TRUE
unmuteBtn = Button(bottomframe, image=unmutePhoto, command=muteFunction, highlightthickness=0, bd=0)
unmuteBtn.grid(row=0, column=1)
# ----------------------------
# --- Volume Slider ---
def setVolume(val):
vol = float(val)/100
mixer.music.set_volume(vol)
if vol > 0:
unmuteBtn.configure(image=unmutePhoto)
muted = FALSE
if vol == 0:
unmuteBtn.configure(image=mutePhoto)
muted = TRUE
scale = ttk.Scale(bottomframe, from_ = 0, to = 100, orient=HORIZONTAL, command =setVolume)
scale.set(50)
mixer.music.set_volume(0.5)
scale.grid(row = 0, column = 2,pady = 15)
listofplaylist()
# ----------
def on_closing():
stopFunction()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
# def start():
# root.protocol("WM_DELETE_WINDOW", on_closing)
# root.mainloop()
|
zconfig.py | #!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.zconfig - bonjour/zeroconfig support
"""
import logging
import socket
_HOST_PORT = (None, None)
try:
from sabnzbd.utils import pybonjour
from threading import Thread
_HAVE_BONJOUR = True
except:
_HAVE_BONJOUR = False
import sabnzbd
import sabnzbd.cfg as cfg
from sabnzbd.misc import is_localhost
_BONJOUR_OBJECT = None
def _zeroconf_callback(sdRef, flags, errorCode, name, regtype, domain):
logging.debug(
"Full Bonjour-callback sdRef=%s, flags=%s, errorCode=%s, name=%s, regtype=%s, domain=%s",
sdRef,
flags,
errorCode,
name,
regtype,
domain,
)
if errorCode == pybonjour.kDNSServiceErr_NoError:
logging.info('Registered in Bonjour as "%s" (%s)', name, domain)
def set_bonjour(host=None, port=None):
"""Publish host/port combo through Bonjour"""
global _HOST_PORT, _BONJOUR_OBJECT
if not _HAVE_BONJOUR or not cfg.enable_broadcast():
logging.info("No bonjour/zeroconf support installed")
return
if host is None and port is None:
host, port = _HOST_PORT
else:
_HOST_PORT = (host, port)
scope = pybonjour.kDNSServiceInterfaceIndexAny
zhost = None
domain = None
if is_localhost(host):
logging.info("Cannot setup bonjour/zeroconf for localhost (%s)", host)
# All implementations fail to implement "localhost" properly
# A false address is published even when scope==kDNSServiceInterfaceIndexLocalOnly
return
name = socket.gethostname()
logging.debug('Try to publish in Bonjour as "%s" (%s:%s)', name, host, port)
try:
refObject = pybonjour.DNSServiceRegister(
interfaceIndex=scope,
name="SABnzbd on %s:%s" % (name, port),
regtype="_http._tcp",
domain=domain,
host=zhost,
port=int(port),
txtRecord=pybonjour.TXTRecord({"path": cfg.url_base(), "https": cfg.enable_https()}),
callBack=_zeroconf_callback,
)
except sabnzbd.utils.pybonjour.BonjourError as e:
_BONJOUR_OBJECT = None
logging.debug("Failed to start Bonjour service: %s", str(e))
except:
_BONJOUR_OBJECT = None
logging.debug("Failed to start Bonjour service due to non-pybonjour related problem", exc_info=True)
else:
Thread(target=_bonjour_server, args=(refObject,))
_BONJOUR_OBJECT = refObject
logging.debug("Successfully started Bonjour service")
def _bonjour_server(refObject):
while 1:
pybonjour.DNSServiceProcessResult(refObject)
logging.debug("GOT A BONJOUR CALL")
def remove_server():
"""Remove Bonjour registration"""
global _BONJOUR_OBJECT
if _BONJOUR_OBJECT:
_BONJOUR_OBJECT.close()
_BONJOUR_OBJECT = None
|
__init__.py | from typing import Dict, List, Union
import threading
import asyncio
import json
from aiocqhttp import CQHttp, MessageSegment
from Core.UMRType import UnifiedMessage, MessageEntity, ChatType, EntityType
from Core import UMRDriver
from Core import UMRLogging
from Core.UMRMessageRelation import set_ingress_message_id, set_egress_message_id
from Util.Helper import check_attribute, unparse_entities_to_markdown
from Core import UMRConfig
import re
import os
qq_emoji_list = { # created by JogleLew and jqqqqqqqqqq, optimized based on Tim's emoji support
0: '😮',
1: '😣',
2: '😍',
3: '😳',
4: '😎',
5: '😭',
6: '☺️',
7: '😷',
8: '😴',
9: '😭',
10: '😰',
11: '😡',
12: '😝',
13: '😃',
14: '🙂',
15: '🙁',
16: '🤓',
17: '[Empty]',
18: '😤',
19: '😨',
20: '😏',
21: '😊',
22: '🙄',
23: '😕',
24: '🤤',
25: '😪',
26: '😨',
27: '😓',
28: '😬',
29: '🤑',
30: '✊',
31: '😤',
32: '🤔',
33: '🤐',
34: '😵',
35: '😩',
36: '💣',
37: '💀',
38: '🔨',
39: '👋',
40: '[Empty]',
41: '😮',
42: '💑',
43: '🕺',
44: '[Empty]',
45: '[Empty]',
46: '🐷',
47: '[Empty]',
48: '[Empty]',
49: '🤷',
50: '[Empty]',
51: '[Empty]',
52: '[Empty]',
53: '🎂',
54: '⚡',
55: '💣',
56: '🔪',
57: '⚽️',
58: '[Empty]',
59: '💩',
60: '☕️',
61: '🍚',
62: '[Empty]',
63: '🌹',
64: '🥀',
65: '[Empty]',
66: '❤️',
67: '💔️',
68: '[Empty]',
69: '🎁',
70: '[Empty]',
71: '[Empty]',
72: '[Empty]',
73: '[Empty]',
74: '🌞️',
75: '🌃',
76: '👍',
77: '👎',
78: '🤝',
79: '✌️',
80: '[Empty]',
81: '[Empty]',
82: '[Empty]',
83: '[Empty]',
84: '[Empty]',
85: '🥰',
86: '[怄火]',
87: '[Empty]',
88: '[Empty]',
89: '🍉',
90: '[Empty]',
91: '[Empty]',
92: '[Empty]',
93: '[Empty]',
94: '[Empty]',
95: '[Empty]',
96: '😅',
97: '[擦汗]',
98: '[抠鼻]',
99: '👏',
100: '[糗大了]',
101: '😏',
102: '😏',
103: '😏',
104: '🥱',
105: '[鄙视]',
106: '😭',
107: '😭',
108: '[阴险]',
109: '😚',
110: '🙀',
111: '[可怜]',
112: '🔪',
113: '🍺',
114: '🏀',
115: '🏓',
116: '❤️',
117: '🐞',
118: '[抱拳]',
119: '[勾引]',
120: '✊',
121: '[差劲]',
122: '🤟',
123: '🚫',
124: '👌',
125: '[转圈]',
126: '[磕头]',
127: '[回头]',
128: '[跳绳]',
129: '👋',
130: '[激动]',
131: '[街舞]',
132: '😘',
133: '[左太极]',
134: '[右太极]',
135: '[Empty]',
136: '[双喜]',
137: '🧨',
138: '🏮',
139: '💰',
140: '[K歌]',
141: '🛍️',
142: '📧',
143: '[帅]',
144: '👏',
145: '🙏',
146: '[爆筋]',
147: '🍭',
148: '🍼',
149: '[下面]',
150: '🍌',
151: '🛩',
152: '🚗',
153: '🚅',
154: '[车厢]',
155: '[高铁右车头]',
156: '🌥',
157: '下雨',
158: '💵',
159: '🐼',
160: '💡',
161: '[风车]',
162: '⏰',
163: '🌂',
164: '[彩球]',
165: '💍',
166: '🛋',
167: '[纸巾]',
168: '💊',
169: '🔫',
170: '🐸',
171: '🍵',
172: '[眨眼睛]',
173: '😭',
174: '[无奈]',
175: '[卖萌]',
176: '[小纠结]',
177: '[喷血]',
178: '[斜眼笑]',
179: '[doge]',
180: '[惊喜]',
181: '[骚扰]',
182: '😹',
183: '[我最美]',
184: '🦀',
185: '[羊驼]',
186: '[Empty]',
187: '👻',
188: '🥚',
189: '[Empty]',
190: '🌼',
191: '[Empty]',
192: '🧧',
193: '😄',
194: '😞',
195: '[Empty]',
196: '[Empty]',
197: '[冷漠]',
198: '[呃]',
199: '👍',
200: '👋',
201: '👍',
202: '[无聊]',
203: '[托脸]',
204: '[吃]',
205: '💐',
206: '😨',
207: '[花痴]',
208: '[小样儿]',
209: '[Empty]',
210: '😭',
211: '[我不看]',
212: '[托腮]',
213: '[Empty]',
214: '😙',
215: '[糊脸]',
216: '[拍头]',
217: '[扯一扯]',
218: '[舔一舔]',
219: '[蹭一蹭]',
220: '[拽炸天]',
221: '[顶呱呱]',
222: '🤗',
223: '[暴击]',
224: '🔫',
225: '[撩一撩]',
226: '[拍桌]',
227: '👏',
228: '[恭喜]',
229: '🍻',
230: '[嘲讽]',
231: '[哼]',
232: '[佛系]',
233: '[掐一掐]',
234: '😮',
235: '[颤抖]',
236: '[啃头]',
237: '[偷看]',
238: '[扇脸]',
239: '[原谅]',
240: '[喷脸]',
241: '🎂',
242: '[Empty]',
243: '[Empty]',
244: '[Empty]',
245: '[Empty]',
246: '[Empty]',
247: '[Empty]',
248: '[Empty]',
249: '[Empty]',
250: '[Empty]',
251: '[Empty]',
252: '[Empty]',
253: '[Empty]',
254: '[Empty]',
255: '[Empty]',
}
# original text copied from Tim
qq_emoji_text_list = {
0: '[惊讶]',
1: '[撇嘴]',
2: '[色]',
3: '[发呆]',
4: '[得意]',
5: '[流泪]',
6: '[害羞]',
7: '[闭嘴]',
8: '[睡]',
9: '[大哭]',
10: '[尴尬]',
11: '[发怒]',
12: '[调皮]',
13: '[呲牙]',
14: '[微笑]',
15: '[难过]',
16: '[酷]',
17: '[Empty]',
18: '[抓狂]',
19: '[吐]',
20: '[偷笑]',
21: '[可爱]',
22: '[白眼]',
23: '[傲慢]',
24: '[饥饿]',
25: '[困]',
26: '[惊恐]',
27: '[流汗]',
28: '[憨笑]',
29: '[悠闲]',
30: '[奋斗]',
31: '[咒骂]',
32: '[疑问]',
33: '[嘘]',
34: '[晕]',
35: '[折磨]',
36: '[衰]',
37: '[骷髅]',
38: '[敲打]',
39: '[再见]',
40: '[Empty]',
41: '[发抖]',
42: '[爱情]',
43: '[跳跳]',
44: '[Empty]',
45: '[Empty]',
46: '[猪头]',
47: '[Empty]',
48: '[Empty]',
49: '[拥抱]',
50: '[Empty]',
51: '[Empty]',
52: '[Empty]',
53: '[蛋糕]',
54: '[闪电]',
55: '[炸弹]',
56: '[刀]',
57: '[足球]',
58: '[Empty]',
59: '[便便]',
60: '[咖啡]',
61: '[饭]',
62: '[Empty]',
63: '[玫瑰]',
64: '[凋谢]',
65: '[Empty]',
66: '[爱心]',
67: '[心碎]',
68: '[Empty]',
69: '[礼物]',
70: '[Empty]',
71: '[Empty]',
72: '[Empty]',
73: '[Empty]',
74: '[太阳]',
75: '[月亮]',
76: '[赞]',
77: '[踩]',
78: '[握手]',
79: '[胜利]',
80: '[Empty]',
81: '[Empty]',
82: '[Empty]',
83: '[Empty]',
84: '[Empty]',
85: '[飞吻]',
86: '[怄火]',
87: '[Empty]',
88: '[Empty]',
89: '[西瓜]',
90: '[Empty]',
91: '[Empty]',
92: '[Empty]',
93: '[Empty]',
94: '[Empty]',
95: '[Empty]',
96: '[冷汗]',
97: '[擦汗]',
98: '[抠鼻]',
99: '[鼓掌]',
100: '[糗大了]',
101: '[坏笑]',
102: '[左哼哼]',
103: '[右哼哼]',
104: '[哈欠]',
105: '[鄙视]',
106: '[委屈]',
107: '[快哭了]',
108: '[阴险]',
109: '[亲亲]',
110: '[吓]',
111: '[可怜]',
112: '[菜刀]',
113: '[啤酒]',
114: '[篮球]',
115: '[乒乓]',
116: '[示爱]',
117: '[瓢虫]',
118: '[抱拳]',
119: '[勾引]',
120: '[拳头]',
121: '[差劲]',
122: '[爱你]',
123: '[NO]',
124: '[OK]',
125: '[转圈]',
126: '[磕头]',
127: '[回头]',
128: '[跳绳]',
129: '[挥手]',
130: '[激动]',
131: '[街舞]',
132: '[献吻]',
133: '[左太极]',
134: '[右太极]',
135: '[Empty]',
136: '[双喜]',
137: '[鞭炮]',
138: '[灯笼]',
139: '[发财]',
140: '[K歌]',
141: '[购物]',
142: '[邮件]',
143: '[帅]',
144: '[喝彩]',
145: '[祈祷]',
146: '[爆筋]',
147: '[棒棒糖]',
148: '[喝奶]',
149: '[下面]',
150: '[香蕉]',
151: '[飞机]',
152: '[开车]',
153: '[高铁左车头]',
154: '[车厢]',
155: '[高铁右车头]',
156: '[多云]',
157: '[下雨]',
158: '[钞票]',
159: '[熊猫]',
160: '[灯泡]',
161: '[风车]',
162: '[闹钟]',
163: '[打伞]',
164: '[彩球]',
165: '[钻戒]',
166: '[沙发]',
167: '[纸巾]',
168: '[药]',
169: '[手枪]',
170: '[青蛙]',
171: '[茶]',
172: '[眨眼睛]',
173: '[泪奔]',
174: '[无奈]',
175: '[卖萌]',
176: '[小纠结]',
177: '[喷血]',
178: '[斜眼笑]',
179: '[doge]',
180: '[惊喜]',
181: '[骚扰]',
182: '[笑哭]',
183: '[我最美]',
184: '[河蟹]',
185: '[羊驼]',
186: '[Empty]',
187: '[幽灵]',
188: '[蛋]',
189: '[Empty]',
190: '[菊花]',
191: '[Empty]',
192: '[红包]',
193: '[大笑]',
194: '[不开心]',
195: '[Empty]',
196: '[Empty]',
197: '[冷漠]',
198: '[呃]',
199: '[好棒]',
200: '[拜托]',
201: '[点赞]',
202: '[无聊]',
203: '[托脸]',
204: '[吃]',
205: '[送花]',
206: '[害怕]',
207: '[花痴]',
208: '[小样儿]',
209: '[Empty]',
210: '[飙泪]',
211: '[我不看]',
212: '[托腮]',
213: '[Empty]',
214: '[啵啵]',
215: '[糊脸]',
216: '[拍头]',
217: '[扯一扯]',
218: '[舔一舔]',
219: '[蹭一蹭]',
220: '[拽炸天]',
221: '[顶呱呱]',
222: '[抱抱]',
223: '[暴击]',
224: '[开枪]',
225: '[撩一撩]',
226: '[拍桌]',
227: '[拍手]',
228: '[恭喜]',
229: '[干杯]',
230: '[嘲讽]',
231: '[哼]',
232: '[佛系]',
233: '[掐一掐]',
234: '[惊呆]',
235: '[颤抖]',
236: '[啃头]',
237: '[偷看]',
238: '[扇脸]',
239: '[原谅]',
240: '[喷脸]',
241: '[生日快乐]',
242: '[Empty]',
243: '[Empty]',
244: '[Empty]',
245: '[Empty]',
246: '[Empty]',
247: '[Empty]',
248: '[Empty]',
249: '[Empty]',
250: '[Empty]',
251: '[Empty]',
252: '[Empty]',
253: '[Empty]',
254: '[Empty]',
255: '[Empty]',
}
qq_sface_list = {
1: '[拜拜]',
2: '[鄙视]',
3: '[菜刀]',
4: '[沧桑]',
5: '[馋了]',
6: '[吃惊]',
7: '[微笑]',
8: '[得意]',
9: '[嘚瑟]',
10: '[瞪眼]',
11: '[震惊]',
12: '[鼓掌]',
13: '[害羞]',
14: '[好的]',
15: '[惊呆了]',
16: '[静静看]',
17: '[可爱]',
18: '[困]',
19: '[脸红]',
20: '[你懂的]',
21: '[期待]',
22: '[亲亲]',
23: '[伤心]',
24: '[生气]',
25: '[摇摆]',
26: '[帅]',
27: '[思考]',
28: '[震惊哭]',
29: '[痛心]',
30: '[偷笑]',
31: '[挖鼻孔]',
32: '[抓狂]',
33: '[笑着哭]',
34: '[无语]',
35: '[捂脸]',
36: '[喜欢]',
37: '[笑哭]',
38: '[疑惑]',
39: '[赞]',
40: '[眨眼]'
}
class QQDriver(UMRDriver.BaseDriver):
def __init__(self, name):
self.name = name
self.logger = UMRLogging.getLogger(f'UMRDriver.{self.name}')
self.logger.debug(f'Started initialization for {self.name}')
self.loop: asyncio.AbstractEventLoop = asyncio.new_event_loop()
self.loop.set_exception_handler(self.handle_exception)
self.config: Dict = UMRConfig.config['Driver'][self.name]
attributes = [
('Account', False, None),
('APIRoot', False, None),
('ListenIP', False, None),
('ListenPort', False, None),
('Token', False, None),
('Secret', False, None),
('NameforPrivateChat', False, None),
('NameforGroupChat', False, None),
]
check_attribute(self.config, attributes, self.logger)
self.bot = CQHttp(api_root=self.config.get('APIRoot'),
access_token=self.config.get('Token'),
secret=self.config.get('Secret'))
##### initializations #####
# get group list
self.group_list: Dict[int, Dict[int, Dict]] = dict() # Dict[group_id, Dict[member_id, member_info]]
# see https://cqhttp.cc/docs/4.13/#/API?id=响应数据23
self.is_coolq_pro = self.config.get('IsPro', False) # todo initialization on startup
self.stranger_list: Dict[int, str] = dict()
self.chat_type_dict = {
'group': ChatType.GROUP,
'discuss': ChatType.DISCUSS,
'private': ChatType.PRIVATE,
}
self.chat_type_dict_reverse = {v: k for k, v in self.chat_type_dict.items()}
@self.bot.on_message()
async def handle_msg(context):
message_type = context.get("message_type")
chat_id = context.get(f'{message_type}_id')
chat_type = self.chat_type_dict[message_type]
self.logger.debug(f'Received message: {str(context)}')
unified_message_list = await self.dissemble_message(context)
set_ingress_message_id(src_platform=self.name, src_chat_id=chat_id, src_chat_type=chat_type,
src_message_id=context.get('message_id'), user_id=context.get('user_id'))
for message in unified_message_list:
await UMRDriver.receive(message)
return {}
def start(self):
def run():
asyncio.set_event_loop(self.loop)
self.logger.debug(f'Starting Quart server for {self.name}')
task = self.bot._server_app.run_task(host=self.config.get('ListenIP'),
port=self.config.get('ListenPort'))
self.loop.create_task(task)
self.loop.run_forever()
t = threading.Thread(target=run)
t.daemon = True
UMRDriver.threads.append(t)
t.start()
self.logger.debug(f'Finished initialization for {self.name}')
##### Define send and receive #####
async def send(self, to_chat: Union[int, str], chat_type: ChatType, messsage: UnifiedMessage):
"""
decorator for send new message
:return:
"""
self.logger.debug('calling real send')
return asyncio.run_coroutine_threadsafe(self._send(to_chat, chat_type, messsage), self.loop)
async def _send(self, to_chat: int, chat_type: ChatType, message: UnifiedMessage):
"""
decorator for send new message
:return:
"""
self.logger.debug('begin processing message')
context = dict()
if chat_type == ChatType.UNSPECIFIED:
self.logger.warning(f'Sending to undefined group or chat {to_chat}')
return
_chat_type = self.chat_type_dict_reverse[chat_type]
context['message_type'] = _chat_type
context['message'] = list()
if message.image:
image_name = os.path.basename(message.image)
context['message'].append(MessageSegment.image(image_name))
if (_chat_type == 'private' and self.config['NameforPrivateChat']) or \
(_chat_type in ('group', 'discuss') and self.config['NameforGroupChat']):
# name logic
if message.chat_attrs.name:
context['message'].append(MessageSegment.text(message.chat_attrs.name))
if message.chat_attrs.reply_to:
context['message'].append(MessageSegment.text(' (➡️️' + message.chat_attrs.reply_to.name + ')'))
if message.chat_attrs.forward_from:
context['message'].append(MessageSegment.text(' (️️↩️' + message.chat_attrs.forward_from.name + ')'))
if message.chat_attrs.name:
context['message'].append(MessageSegment.text(': '))
# at user
if message.send_action.user_id:
context['message'].append(MessageSegment.at(message.send_action.user_id))
context['message'].append(MessageSegment.text(' '))
context['message'].append(MessageSegment.text(unparse_entities_to_markdown(message, EntityType.PLAIN)))
if _chat_type == 'private':
context['user_id'] = to_chat
else:
context[f'{_chat_type}_id'] = to_chat
self.logger.debug('finished processing message, ready to send')
result = await self.bot.send(context, context['message'])
if message.chat_attrs:
set_egress_message_id(src_platform=message.chat_attrs.platform,
src_chat_id=message.chat_attrs.chat_id,
src_chat_type=message.chat_attrs.chat_type,
src_message_id=message.chat_attrs.message_id,
dst_platform=self.name,
dst_chat_id=to_chat,
dst_chat_type=chat_type,
dst_message_id=result.get('message_id'),
user_id=self.config['Account'])
self.logger.debug('finished sending')
return result.get('message_id')
async def get_username(self, user_id: int, chat_id: int, chat_type: ChatType):
if user_id == self.config['Account']:
return 'bot'
if user_id == 1000000:
return 'App message'
if chat_type == ChatType.GROUP:
user = await self.bot.get_group_member_info(group_id=chat_id, user_id=user_id)
username = user.get('card')
if not username:
username = user.get('nickname', str(user_id))
else:
user = await self.bot.get_stranger_info(user_id=user_id)
username = user.get('nickname', str(user_id))
if username == 'mpqqnickname':
username = 'TencentBot'
return username
async def dissemble_message(self, context):
# group_id = context.get('group_id')
# user_id = context.get('user_id')
# user = group_list.get(group_id, dict()).get(user_id, dict())
# username = user.get('nickname', str(user_id))
# for i in range(len(context['message'])):
# message = UnifiedMessage(from_platform=self.name, from_chat=group_id, from_user=username,
# message=context.get('raw_message'))
message_type = context.get('message_type')
if message_type in ('group', 'discuss'):
chat_id = context.get(f'{message_type}_id')
else:
chat_id = context.get('user_id')
user_id = context.get('user_id')
message_id = context.get('message_id')
user = context.get('sender')
username = user.get('card')
if not username:
username = user.get('nickname', str(user_id))
message: List[Dict] = context['message']
unified_message = await self.parse_special_message(chat_id, self.chat_type_dict[message_type], username, message_id, user_id, message)
if unified_message:
return [unified_message]
unified_message_list = await self.parse_message(chat_id, self.chat_type_dict[message_type], username, message_id, user_id, message)
return unified_message_list
async def parse_special_message(self, chat_id: int, chat_type: ChatType, username: str, message_id: int, user_id: int,
message: List[Dict[str, Dict[str, str]]]):
if len(message) > 1:
return None
message = message[0]
message_type = message['type']
message = message['data']
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
if message_type == 'share':
unified_message.message = 'Shared '
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(message['title']),
entity_type=EntityType.LINK,
link=message['url']))
unified_message.message += message['title']
elif message_type == 'rich':
if 'url' in message:
url = message['url']
if url.startswith('mqqapi'):
cq_location_regex = re.compile(r'^mqqapi:.*lat=(.*)&lon=(.*)&title=(.*)&loc=(.*)&.*$')
locations = cq_location_regex.findall(message['url']) # [('lat', 'lon', 'name', 'addr')]
unified_message.message = f'Shared a location: {locations[2]}, {locations[3]}, {locations[0]}, {locations[1]}'
else:
unified_message.message = message.get('title', message.get('text'))
unified_message.message_entities.append(
MessageEntity(start=0,
end=len(unified_message.message),
entity_type=EntityType.LINK,
link=message['url']))
elif 'title' in message:
if 'content' in message:
try:
content = json.loads(message['content'])
if 'news' in content:
unified_message.message = 'Shared '
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(message['title']),
entity_type=EntityType.LINK,
link=content.get('jumpUrl')))
unified_message.message += message['title'] + ' ' + message.get('desc')
elif 'weather' in content:
unified_message.message = message['title']
else:
self.logger.debug(f'Got miscellaneous rich text message with content: {str(message)}')
unified_message.message = message['title']
except:
self.logger.exception(f'Cannot decode json: {str(message)}')
unified_message.message = message['title']
else:
unified_message.message = message['title']
else:
self.logger.debug(f'Got miscellaneous rich text message: {str(message)}')
unified_message.message = message.get('text', str(message))
elif message_type == 'dice':
unified_message.message = 'Rolled '
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(message['type']),
entity_type=EntityType.BOLD))
unified_message.message += message['type']
elif message_type == 'rps':
unified_message.message = 'Played '
played = {'1': 'Rock',
'2': 'Scissors',
'3': 'Paper'}[message['type']]
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(played),
entity_type=EntityType.BOLD))
unified_message.message += played
elif message_type == 'shake':
unified_message.message = 'Sent you a shake'
elif message_type == 'music':
if message['type'].startswith('163'):
unified_message.message = 'Shared a music: '
music_title = 'Netease Music'
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(music_title),
entity_type=EntityType.LINK,
link=f'https://music.163.com/song?id={message["id"]}'))
unified_message += music_title
elif message['type'].startswith('qq'):
unified_message.message = 'Shared a music: '
music_title = 'Netease Music'
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(music_title),
entity_type=EntityType.LINK,
link=f'https://y.qq.com/n/yqq/song/{message["id"]}_num.html'))
unified_message += music_title
else:
self.logger.debug(f'Got unseen music share message: {str(message)}')
unified_message.message = 'Shared a music: ' + str(message)
elif message_type == 'record':
unified_message.message = 'Unsupported voice record, please view on QQ'
elif message_type == 'bface':
unified_message.message = 'Unsupported big face, please view on QQ'
elif message_type == 'sign':
unified_message.image = message['image']
sign_text = f'Sign at location: {message["location"]} with title: {message["title"]}'
unified_message.message = sign_text
else:
return
return unified_message
async def parse_message(self, chat_id: int, chat_type: ChatType, username: str, message_id: int, user_id: int,
message: List[Dict[str, Dict[str, str]]]):
message_list = list()
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
for m in message:
message_type = m['type']
m = m['data']
if message_type == 'image':
# message not empty or contained a image, append to list
if unified_message.message or unified_message.image:
message_list.append(unified_message)
unified_message = UnifiedMessage(platform=self.name,
chat_id=chat_id,
chat_type=chat_type,
name=username,
user_id=user_id,
message_id=message_id)
unified_message.image = m['url']
elif message_type == 'text':
unified_message.message += m['text']
elif message_type == 'at':
target = await self.get_username(int(m['qq']), chat_id, chat_type)
at_user_text = '@' + target
unified_message.message_entities.append(
MessageEntity(start=len(unified_message.message),
end=len(unified_message.message) + len(at_user_text),
entity_type=EntityType.BOLD))
unified_message.message += at_user_text
elif message_type == 'sface':
qq_face = int(m['id']) & 255
if qq_face in qq_sface_list:
unified_message.message += qq_sface_list[qq_face]
else:
unified_message.message += '\u2753' # ❓
elif message_type == 'face':
qq_face = int(m['id'])
if qq_face in qq_emoji_list:
unified_message.message += qq_emoji_list[qq_face]
else:
unified_message.message += '\u2753' # ❓
else:
self.logger.debug(f'Unhandled message type: {str(m)} with type: {message_type}')
message_list.append(unified_message)
return message_list
async def is_group_admin(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
if chat_id not in self.group_list:
return False
return self.group_list[chat_id][user_id]['role'] in ('owner', 'admin')
async def is_group_owner(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
if chat_id not in self.group_list:
return False
return self.group_list[chat_id][user_id]['role'] == 'owner'
def handle_exception(self, loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
self.logger.exception('Unhandled exception: ', exc_info=msg)
UMRDriver.register_driver('QQ', QQDriver)
|
create_tfrecords.py | """
Create the tfrecord files for a dataset.
A lot of this code comes from the tensorflow inception example, so here is their license:
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
from __future__ import absolute_import
import argparse
from datetime import datetime
import hashlib
import json
import os
from queue import Queue
import random
import sys
import threading
import numpy as np
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting bytes list features into Example proto."""
value = [x.encode('utf8') for x in value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if not isinstance(value, bytes):
value = value.encode('utf8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _validate_text(text):
"""If text is not str or unicode, then try to convert it to str."""
if isinstance(text, str):
return text
else:
return str(text)
def _convert_to_example(image_example, image_buffer, height, width, colorspace='RGB',
channels=3, image_format='JPEG'):
"""Build an Example proto for an example.
Args:
image_example: dict, an image example
image_buffer: string, JPEG encoding of RGB image
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
# Required
filename = str(image_example['filename'])
image_id = str(image_example['id'])
# Class label for the whole image
image_class = image_example.get('class', {})
class_label = image_class.get('label', 0)
class_text = _validate_text(image_class.get('text', ''))
class_conf = image_class.get('conf', 1.)
# Objects
image_objects = image_example.get('object', {})
object_count = image_objects.get('count', 0)
# Bounding Boxes
image_bboxes = image_objects.get('bbox', {})
xmin = image_bboxes.get('xmin', [])
xmax = image_bboxes.get('xmax', [])
ymin = image_bboxes.get('ymin', [])
ymax = image_bboxes.get('ymax', [])
bbox_scores = image_bboxes.get('score', [])
bbox_labels = image_bboxes.get('label', [])
bbox_text = map(_validate_text, image_bboxes.get('text', []))
bbox_label_confs = image_bboxes.get('conf', [])
# Parts
image_parts = image_objects.get('parts', {})
parts_x = image_parts.get('x', [])
parts_y = image_parts.get('y', [])
parts_v = image_parts.get('v', [])
parts_s = image_parts.get('score', [])
# Areas
object_areas = image_objects.get('area', [])
# Ids
object_ids = map(str, image_objects.get('id', []))
# Any extra data (e.g. stringified json)
extra_info = str(image_class.get('extra', ''))
# Additional fields for the format needed by the Object Detection repository
key = hashlib.sha256(image_buffer).hexdigest()
is_crowd = image_objects.get('is_crowd', [])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(filename),
'image/id': _bytes_feature(image_id),
'image/encoded': _bytes_feature(image_buffer),
'image/extra': _bytes_feature(extra_info),
'image/class/label': _int64_feature(class_label),
'image/class/text': _bytes_list_feature(class_text),
'image/class/conf': _float_feature(class_conf),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(bbox_labels),
'image/object/bbox/text': _bytes_list_feature(bbox_text),
'image/object/bbox/conf': _float_feature(bbox_label_confs),
'image/object/bbox/score' : _float_feature(bbox_scores),
'image/object/parts/x' : _float_feature(parts_x),
'image/object/parts/y' : _float_feature(parts_y),
'image/object/parts/v' : _int64_feature(parts_v),
'image/object/parts/score' : _float_feature(parts_s),
'image/object/count' : _int64_feature(object_count),
'image/object/area' : _float_feature(object_areas),
'image/object/id' : _bytes_list_feature(object_ids),
# Additional fields for the format needed by the Object Detection repository
'image/source_id': _bytes_list_feature(image_id),
'image/key/sha256': _bytes_feature(key),
'image/object/class/label': _int64_feature(bbox_labels),
'image/object/class/text': _bytes_list_feature(bbox_text),
'image/object/is_crowd': _int64_feature(is_crowd)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
# Convert the image data from png to jpg
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
# Decode the image data as a jpeg image
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3, "JPEG needs to have height x width x channels"
assert image.shape[2] == 3, "JPEG needs to have 3 channels (RGB)"
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
_, file_extension = os.path.splitext(filename)
return file_extension.lower() == '.png'
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, output_directory,
dataset, num_shards, store_images, error_queue):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set (e.g. `train` or `test`)
output_directory: string, file path to store the tfrecord files.
dataset: list, a list of image example dicts
num_shards: integer number of shards for this data set.
store_images: bool, should the image be stored in the tfrecord
error_queue: Queue, a queue to place image examples that failed.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
error_counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
image_example = dataset[i]
filename = str(image_example['filename'])
try:
if store_images:
if 'encoded' in image_example:
image_buffer = image_example['encoded']
height = image_example['height']
width = image_example['width']
colorspace = image_example['colorspace']
image_format = image_example['format']
num_channels = image_example['channels']
example = _convert_to_example(image_example, image_buffer, height,
width, colorspace, num_channels,
image_format)
else:
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(image_example, image_buffer, height,
width)
else:
image_buffer=b''
height = int(image_example['height'])
width = int(image_example['width'])
example = _convert_to_example(image_example, image_buffer, height,
width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
except Exception as e:
raise
error_counter += 1
error_msg = repr(e)
image_example['error_msg'] = error_msg
error_queue.put(image_example)
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s, with %d errors.' %
(datetime.now(), thread_index, shard_counter, output_file, error_counter))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards, with %d errors.' %
(datetime.now(), thread_index, counter, num_files_in_thread, error_counter))
sys.stdout.flush()
def create(dataset, dataset_name, output_directory, num_shards, num_threads, shuffle=True, store_images=True):
"""Create the tfrecord files to be used to train or test a model.
Args:
dataset : [{
"filename" : <REQUIRED: path to the image file>,
"id" : <REQUIRED: id of the image>,
"class" : {
"label" : <[0, num_classes)>,
"text" : <text description of class>
},
"object" : {
"bbox" : {
"xmin" : [],
"xmax" : [],
"ymin" : [],
"ymax" : [],
"label" : []
}
}
}]
dataset_name: a name for the dataset
output_directory: path to a directory to write the tfrecord files
num_shards: the number of tfrecord files to create
num_threads: the number of threads to use
shuffle : bool, should the image examples be shuffled or not prior to creating the tfrecords.
Returns:
list : a list of image examples that failed to process.
"""
# Images in the tfrecords set must be shuffled properly
if shuffle:
random.shuffle(dataset)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(dataset), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
# A Queue to hold the image examples that fail to process.
error_queue = Queue()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, dataset_name, output_directory, dataset,
num_shards, store_images, error_queue)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(dataset)))
# Collect the errors
errors = []
while not error_queue.empty():
errors.append(error_queue.get())
print ('%d examples failed.' % (len(errors),))
return errors
def parse_args():
parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')
parser.add_argument('--dataset_path', dest='dataset_path',
help='Path to the dataset json file.', type=str,
required=True)
parser.add_argument('--prefix', dest='dataset_name',
help='Prefix for the tfrecords (e.g. `train`, `test`, `val`).', type=str,
required=True)
parser.add_argument('--output_dir', dest='output_dir',
help='Directory for the tfrecords.', type=str,
required=True)
parser.add_argument('--shards', dest='num_shards',
help='Number of shards to make.', type=int,
required=True)
parser.add_argument('--threads', dest='num_threads',
help='Number of threads to make.', type=int,
required=True)
parser.add_argument('--shuffle', dest='shuffle',
help='Shuffle the records before saving them.',
required=False, action='store_true', default=False)
parser.add_argument('--store_images', dest='store_images',
help='Store the images in the tfrecords.',
required=False, action='store_true', default=False)
parsed_args = parser.parse_args()
return parsed_args
def main():
args = parse_args()
with open(args.dataset_path) as f:
dataset = json.load(f)
errors = create(
dataset=dataset,
dataset_name=args.dataset_name,
output_directory=args.output_dir,
num_shards=args.num_shards,
num_threads=args.num_threads,
shuffle=args.shuffle,
store_images=args.store_images
)
return errors
if __name__ == '__main__':
main()
|
base_camera.py | # reference https://github.com/miguelgrinberg/flask-video-streaming/blob/master/base_camera.py
import threading
import time
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""
An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 60:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""" "Generator that returns frames from the camera."""
raise RuntimeError("Must be implemented by subclasses.")
@classmethod
def _thread(cls):
"""Camera background thread."""
print("Starting camera thread.")
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 120 seconds then stop the thread
if time.time() - BaseCamera.last_access > 120:
frames_iterator.close()
print("Stopping camera thread due to inactivity.")
break
BaseCamera.thread = None
|
core.py | import re
import struct
import time
import socket, select
import threading
import xmltodict
import json
try:
import queue as queue
except ImportError:
import Queue as queue
import netifaces
from collections import namedtuple
from . import commands
from .utils import ValueRange, format_nri_list
class ISCPMessage(object):
"""Deals with formatting and parsing data wrapped in an ISCP
containers. The docs say:
ISCP (Integra Serial Control Protocol) consists of three
command characters and parameter character(s) of variable
length.
It seems this was the original protocol used for communicating
via a serial cable.
"""
def __init__(self, data):
self.data = data
def __str__(self):
# ! = start character
# 1 = destination unit type, 1 means receiver
# End character may be CR, LF or CR+LF, according to doc
return '!1{}\r'.format(self.data)
@classmethod
def parse(self, data):
EOF = '\x1a'
TERMINATORS = ['\n', '\r']
assert data[:2] == '!1'
eof_offset = -1
# EOF can be followed by CR/LF/CR+LF
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
assert data[eof_offset] == EOF
return data[2:eof_offset]
class eISCPPacket(object):
"""For communicating over Ethernet, traditional ISCP messages are
wrapped inside an eISCP package.
"""
header = namedtuple('header', (
'magic, header_size, data_size, version, reserved'))
def __init__(self, iscp_message):
iscp_message = str(iscp_message)
# We attach data separately, because Python's struct module does
# not support variable length strings,
header = struct.pack(
'! 4s I I b 3s',
b'ISCP', # magic
16, # header size (16 bytes)
len(iscp_message), # data size
0x01, # version
b'\x00\x00\x00' #reserved
)
self._bytes = header + iscp_message.encode('utf-8')
# __new__, string subclass?
def __str__(self):
return self._bytes.decode('utf-8')
def get_raw(self):
return self._bytes
@classmethod
def parse(cls, bytes):
"""Parse the eISCP package given by ``bytes``.
"""
h = cls.parse_header(bytes[:16])
data = bytes[h.header_size:h.header_size + h.data_size].decode()
assert len(data) == h.data_size
return data
@classmethod
def parse_header(self, bytes):
"""Parse the header of an eISCP package.
This is useful when reading data in a streaming fashion,
because you can subsequently know the number of bytes to
expect in the packet.
"""
# A header is always 16 bytes in length
assert len(bytes) == 16
# Parse the header
magic, header_size, data_size, version, reserved = \
struct.unpack('! 4s I I b 3s', bytes)
magic = magic.decode()
reserved = reserved.decode()
# Strangly, the header contains a header_size field.
assert magic == 'ISCP'
assert header_size == 16
return eISCPPacket.header(
magic, header_size, data_size, version, reserved)
def command_to_packet(command):
"""Convert an ascii command like (PVR00) to the binary data we
need to send to the receiver.
"""
return eISCPPacket(ISCPMessage(command)).get_raw()
def normalize_command(command):
"""Ensures that various ways to refer to a command can be used."""
command = command.lower()
command = command.replace('_', ' ')
command = command.replace('-', ' ')
return command
def command_to_iscp(command, arguments=None, zone=None):
"""Transform the given given high-level command to a
low-level ISCP message.
Raises :class:`ValueError` if `command` is not valid.
This exposes a system of human-readable, "pretty"
commands, which is organized into three parts: the zone, the
command, and arguments. For example::
command('power', 'on')
command('power', 'on', zone='main')
command('volume', 66, zone='zone2')
As you can see, if no zone is given, the main zone is assumed.
Instead of passing three different parameters, you may put the
whole thing in a single string, which is helpful when taking
input from users::
command('power on')
command('zone2 volume 66')
To further simplify things, for example when taking user input
from a command line, where whitespace needs escaping, the
following is also supported:
command('power=on')
command('zone2.volume=66')
"""
default_zone = 'main'
command_sep = r'[. ]'
norm = lambda s: s.strip().lower()
# If parts are not explicitly given, parse the command
if arguments is None and zone is None:
# Separating command and args with colon allows multiple args
if ':' in command or '=' in command:
base, arguments = re.split(r'[:=]', command, 1)
parts = [norm(c) for c in re.split(command_sep, base)]
if len(parts) == 2:
zone, command = parts
else:
zone = default_zone
command = parts[0]
# Split arguments by comma or space
arguments = [norm(a) for a in re.split(r'[ ,]', arguments)]
else:
# Split command part by space or dot
parts = [norm(c) for c in re.split(command_sep, command)]
if len(parts) >= 3:
zone, command = parts[:2]
arguments = parts[3:]
elif len(parts) == 2:
zone = default_zone
command = parts[0]
arguments = parts[1:]
else:
raise ValueError('Need at least command and argument')
# Find the command in our database, resolve to internal eISCP command
group = commands.ZONE_MAPPINGS.get(zone, zone)
if not zone in commands.COMMANDS:
raise ValueError('"{}" is not a valid zone'.format(zone))
prefix = commands.COMMAND_MAPPINGS[group].get(command, command)
if not prefix in commands.COMMANDS[group]:
raise ValueError('"{}" is not a valid command in zone "{}"'.format(
command, zone))
# Resolve the argument to the command. This is a bit more involved,
# because some commands support ranges (volume) or patterns
# (setting tuning frequency). In some cases, we might imagine
# providing the user an API with multiple arguments (TODO: not
# currently supported).
if type(arguments) is list:
argument = arguments[0]
else:
argument = arguments
# 1. Consider if there is a alias, e.g. level-up for UP.
try:
value = commands.VALUE_MAPPINGS[group][prefix][argument]
except KeyError:
# 2. See if we can match a range or pattern
for possible_arg in commands.VALUE_MAPPINGS[group][prefix]:
if type(argument) is int or (type(argument) is str and argument.lstrip("-").isdigit() is True):
if isinstance(possible_arg, ValueRange):
if int(argument) in possible_arg:
# We need to send the format "FF", hex() gives us 0xff
value = hex(int(argument))[2:].zfill(2).upper()
if prefix == 'SWL' or prefix == 'CTL':
if value == '00':
value = '0' + value
elif value[0] != 'X':
value = '+' + value
elif value[0] == 'X':
if len(value) == 2:
value = '-' + '0' + value[1:]
value = '-' + value[1:]
break
# TODO: patterns not yet supported
else:
raise ValueError('"{}" is not a valid argument for command '
'"{}" in zone "{}"'.format(argument, command, zone))
return '{}{}'.format(prefix, value)
def iscp_to_command(iscp_message):
for zone, zone_cmds in commands.COMMANDS.items():
# For now, ISCP commands are always three characters, which
# makes this easy.
command, args = iscp_message[:3], iscp_message[3:]
if command in zone_cmds:
if args in zone_cmds[command]['values']:
return zone_cmds[command]['name'], \
zone_cmds[command]['values'][args]['name']
else:
match = re.match('[+-]?[0-9a-f]+$', args, re.IGNORECASE)
if match:
return zone_cmds[command]['name'], \
int(args, 16)
else:
return zone_cmds[command]['name'], args
else:
raise ValueError(
'Cannot convert ISCP message to command: {}'.format(iscp_message))
def filter_for_message(getter_func, msg):
"""Helper that calls ``getter_func`` until a matching message
is found, or the timeout occurs. Matching means the same commands
group, i.e. for sent message MVLUP we would accept MVL13
in response."""
start = time.time()
while True:
candidate = getter_func(0.05)
# It seems ISCP commands are always three characters.
if candidate and candidate[:3] == msg[:3]:
return candidate
# exception for HDMI-CEC commands (CTV) since they don't provide any response/confirmation
if "CTV" in msg[:3]:
return msg
# The protocol docs claim that a response should arrive
# within *50ms or the communication has failed*. In my tests,
# however, the interval needed to be at least 200ms before
# I managed to see any response, and only after 300ms
# reproducably, so use a generous timeout.
if time.time() - start > 5.0:
raise ValueError('Timeout waiting for response.')
def parse_info(data):
response = eISCPPacket.parse(data)
# Return string looks something like this:
# !1ECNTX-NR609/60128/DX
info = re.match(r'''
!
(?P<device_category>\d)
ECN
(?P<model_name>[^/]*)/
(?P<iscp_port>\d{5})/
(?P<area_code>\w{2})/
(?P<identifier>.{0,12})
''', response.strip(), re.VERBOSE).groupdict()
return info
class eISCP(object):
"""Implements the eISCP interface to Onkyo receivers.
This uses a blocking interface. The remote end will regularily
send unsolicited status updates. You need to manually call
``get_message`` to query those.
You may want to look at the :meth:`Receiver` class instead, which
uses a background thread.
"""
ONKYO_PORT = 60128
CONNECT_TIMEOUT = 5
@classmethod
def discover(cls, timeout=5, clazz=None):
"""Try to find ISCP devices on network.
Waits for ``timeout`` seconds, then returns all devices found,
in form of a list of dicts.
"""
onkyo_magic = eISCPPacket('!xECNQSTN').get_raw()
pioneer_magic = eISCPPacket('!pECNQSTN').get_raw()
# Since due to interface aliasing we may see the same Onkyo device
# multiple times, we build the list as a dict keyed by the
# unique identifier code
found_receivers = {}
# We do this on all network interfaces
# which have an AF_INET address and broadcast address
for interface in netifaces.interfaces():
ifaddrs=netifaces.ifaddresses(interface)
if not netifaces.AF_INET in ifaddrs:
continue
for ifaddr in ifaddrs[netifaces.AF_INET]:
if not "addr" in ifaddr or not "broadcast" in ifaddr:
continue
# Broadcast magic
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0) # So we can use select()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind((ifaddr["addr"], 0))
sock.sendto(onkyo_magic, (ifaddr["broadcast"], eISCP.ONKYO_PORT))
sock.sendto(pioneer_magic, (ifaddr["broadcast"], eISCP.ONKYO_PORT))
while True:
ready = select.select([sock], [], [], timeout)
if not ready[0]:
break
data, addr = sock.recvfrom(1024)
info = parse_info(data)
# Give the user a ready-made receiver instance. It will only
# connect on demand, when actually used.
receiver = (clazz or eISCP)(addr[0], int(info['iscp_port']))
receiver.info = info
found_receivers[info["identifier"]]=receiver
sock.close()
return list(found_receivers.values())
def __init__(self, host, port=60128):
self.host = host
self.port = port
self._info = None
self._nri = None
self.command_socket = None
@property
def model_name(self):
if self.info and self.info.get('model_name'):
return self.info['model_name']
else:
return 'unknown-model'
@property
def identifier(self):
if self.info and self.info.get('identifier'):
return self.info['identifier']
else:
return 'no-id'
def __repr__(self):
if self.info and self.info.get('model_name'):
model = self.info['model_name']
else:
model = 'unknown'
string = "<{}({}) {}:{}>".format(
self.__class__.__name__, model, self.host, self.port)
return string
@property
def info(self):
if not self._info:
sock = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0)
sock.bind(('0.0.0.0', 0))
sock.sendto(eISCPPacket('!xECNQSTN').get_raw(), (self.host, self.port))
ready = select.select([sock], [], [], 0.1)
if ready[0]:
data = sock.recv(1024)
self._info = parse_info(data)
sock.close()
return self._info
@info.setter
def info(self, value):
self._info = value
@property
def nri(self):
if self._nri:
return self._nri
return self.get_nri()
@property
def net_services(self):
data = self.nri.get('netservicelist').get('netservice')
return format_nri_list(data)
@property
def zones(self):
data = self.nri.get('zonelist').get('zone')
return format_nri_list(data)
@property
def controls(self):
data = self.nri.get('controllist').get('control')
return format_nri_list(data)
@property
def functions(self):
data = self.nri.get('functionlist').get('function')
return format_nri_list(data)
@property
def selectors(self):
data = self.nri.get('selectorlist').get('selector')
info = format_nri_list(data)
# Remove Source selector
if info.get("Source") is not None:
info.pop("Source")
return info
@property
def presets(self):
info = {}
data = self.nri.get('presetlist').get('preset')
for item in data:
if item.get("id") is not None:
key = item.pop("id")
info[key] = item
return info
@property
def tuners(self):
info = {}
data = self.nri.get('tuners').get('tuner')
for item in data:
if item.get("band") is not None:
key = item.pop("band")
info[key] = item
return info
def _ensure_socket_connected(self):
if self.command_socket is None:
self.command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.command_socket.settimeout(self.CONNECT_TIMEOUT)
self.command_socket.connect((self.host, self.port))
self.command_socket.setblocking(0)
def disconnect(self):
try:
self.command_socket.close()
except:
pass
self.command_socket = None
def __enter__(self):
self._ensure_socket_connected()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.disconnect()
def send(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``.
This does not return anything, nor does it wait for a response
from the receiver. You can query responses via :meth:`get`,
or use :meth:`raw` to send a message and waiting for one.
"""
self._ensure_socket_connected()
self.command_socket.send(command_to_packet(iscp_message))
def get(self, timeout=0.1):
"""Return the next message sent by the receiver, or, after
``timeout`` has passed, return ``None``.
"""
self._ensure_socket_connected()
ready = select.select([self.command_socket], [], [], timeout or 0)
if ready[0]:
header_bytes = self.command_socket.recv(16)
header = eISCPPacket.parse_header(header_bytes)
body = b''
while len(body) < header.data_size:
ready = select.select([self.command_socket], [], [], timeout or 0)
if not ready[0]:
return None
body += self.command_socket.recv(header.data_size - len(body))
return ISCPMessage.parse(body.decode())
def raw(self, iscp_message):
"""Send a low-level ISCP message, like ``MVL50``, and wait
for a response.
While the protocol is designed to acknowledge each message with
a response, there is no fool-proof way to differentiate those
from unsolicited status updates, though we'll do our best to
try. Generally, this won't be an issue, though in theory the
response this function returns to you sending ``SLI05`` may be
an ``SLI06`` update from another controller.
It'd be preferable to design your app in a way where you are
processing all incoming messages the same way, regardless of
their origin.
"""
while self.get(False):
# Clear all incoming messages. If not yet queried,
# they are lost. This is so that we can find the real
# response to our sent command later.
pass
self.send(iscp_message)
return filter_for_message(self.get, iscp_message)
def command(self, command, arguments=None, zone=None):
"""Send a high-level command to the receiver, return the
receiver's response formatted has a command.
This is basically a helper that combines :meth:`raw`,
:func:`command_to_iscp` and :func:`iscp_to_command`.
"""
iscp_message = command_to_iscp(command, arguments, zone)
response = self.raw(iscp_message)
if response:
return iscp_to_command(response)
def power_on(self):
"""Turn the receiver power on."""
return self.command('power', 'on')
def power_off(self):
"""Turn the receiver power off."""
return self.command('power', 'off')
def get_nri(self):
"""Return NRI info as dict."""
data = self.command("dock.receiver-information=query")[1]
if data:
data = xmltodict.parse(data, attr_prefix="")
data = data.get("response").get("device")
# Cast OrderedDict to dict
data = json.loads(json.dumps(data))
self._nri = data
return data
class Receiver(eISCP):
"""Changes the behaviour of :class:`eISCP` to use a background
thread for network operations. This allows receiving messages
from the receiver via a callback::
def message_received(message):
print message
receiver = Receiver('...')
receiver.on_message = message_received
The argument ``message`` is
"""
@classmethod
def discover(cls, timeout=5, clazz=None):
return eISCP.discover(timeout, clazz or Receiver)
def _ensure_thread_running(self):
if not getattr(self, '_thread', False):
self._stop = False
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_loop)
self._thread.start()
def disconnect(self):
self._stop = True
self._thread.join()
self._thread = None
def send(self, iscp_message):
"""Like :meth:`eISCP.send`, but sends asynchronously via the
background thread.
"""
self._ensure_thread_running()
self._queue.put((iscp_message, None, None))
def get(self, *a, **kw):
"""Not supported by this class. Use the :attr:`on_message``
hook to handle incoming messages.
"""
raise NotImplementedError()
def raw(self, iscp_message):
"""Like :meth:`eISCP.raw`.
"""
self._ensure_thread_running()
event = threading.Event()
result = []
self._queue.put((iscp_message, event, result))
event.wait()
if isinstance(result[0], Exception):
raise result[0]
return result[0]
def _thread_loop(self):
def trigger(message):
if self.on_message:
self.on_message(message)
eISCP._ensure_socket_connected(self)
try:
while not self._stop:
# Clear all incoming message first.
while True:
msg = eISCP.get(self, False)
if not msg:
break
trigger(msg)
# Send next message
try:
item = self._queue.get(timeout=0.01)
except queue.Empty:
continue
if item:
message, event, result = item
eISCP.send(self, message)
# Wait for a response, if the caller so desires
if event:
try:
# XXX We are losing messages here, since
# those are not triggering the callback!
# eISCP.raw() really has the same problem,
# messages being dropped without a chance
# to get() them. Maybe use a queue after all.
response = filter_for_message(
super(Receiver, self).get, message)
except ValueError as e:
# No response received within timeout
result.append(e)
else:
result.append(response)
# Mark as processed
event.set()
finally:
eISCP.disconnect(self)
|
bchn-rpc-gbt-checkvalidity-ignorecache.py | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin Cash Node developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
bchn-rpc-gbt-checkvalidity-ignorecache
Test that the -gbtcheckvalidity arg works, and that the template
args "checkvalidity" and "ignorecache" work.
"""
import contextlib
import threading
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import sync_mempools
from decimal import Decimal
class GBTCheckValidityAndIgnoreCacheTest(BitcoinTestFramework):
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
# For performance, we disable the mempool check (on by default in tests)
common_args = ['-checkmempool=0']
self.extra_args = [common_args + ['-gbtcheckvalidity=1']] * (self.num_nodes - 1)
# We set it so that the last node doesn't check block template validity by default
self.extra_args.append(common_args + ['-gbtcheckvalidity=0'])
@contextlib.contextmanager
def assert_not_in_debug_log(self, node, excluded_msgs, timeout=2):
""" The inverse of assert_debug_log: fail if any of the excluded_msgs are encountered in the log. """
node_num = self.nodes.index(node)
try:
with node.assert_debug_log(excluded_msgs, timeout):
yield
except AssertionError as e:
if 'Expected messages "{}" does not partially match log:'.format(str(excluded_msgs)) in str(e):
return # success
raise
# failure
raise AssertionError('Some excluded messages"{}" were found in the debug log for node {}'
.format(str(excluded_msgs), node_num))
def run_test(self):
self.log.info("Generating 101 blocks ...")
self.nodes[-1].generate(101 + self.num_nodes)
addrs = [node.getnewaddress() for node in self.nodes]
n_txs = 32
self.log.info("Filling mempool with {} txns ...".format(n_txs))
fee = Decimal('0.00001000')
amt = Decimal('50.0')
amts = [None] * self.num_nodes
for i in range(self.num_nodes):
addr = addrs[i]
amt = amt - fee
self.log.info("Sending to node {}: {}".format(i, amt))
self.nodes[-1].sendtoaddress(addr, amt)
amts[i] = amt
self.nodes[-1].generate(1)
self.sync_all()
def thrd_func(node):
n_tx = n_txs // len(self.nodes)
n = self.nodes.index(node)
node = self.nodes[n]
addr = addrs[n]
amt = amts[n]
for i in range(n_tx):
amt = amt - fee
self.log.info("Node {}: sending {} {}/{}".format(n, amt, i+1, n_tx))
node.sendtoaddress(addr, amt)
amts[n] = amt
threads = [threading.Thread(target=thrd_func, args=(node,))
for node in self.nodes]
for t in threads:
t.start()
for t in threads:
t.join()
try:
sync_mempools(self.nodes, timeout=5)
except AssertionError:
""" We desire to synch the mempools, but it's not required for test success """
self.log.info("Mempool sizes: {}"
.format([node.getmempoolinfo()["size"] for node in self.nodes]))
""" 1. Check base functionality of the -gbtcheckvalidity option """
block_verify_msgs = [
'- Sanity checks:',
'- Fork checks:',
'- Connect ',
'- Verify '
]
# Node0 has default checkvalidity, it should have the messages associated with block verification in the debug
with self.nodes[0].assert_debug_log(block_verify_msgs):
self.nodes[0].getblocktemplatelight()
# The last node has -gbtcheckvalidity=0, it should not have the messages in the debug log
with self.assert_not_in_debug_log(self.nodes[-1], block_verify_msgs):
self.nodes[-1].getblocktemplatelight()
""" 2. Check that the 'ignorecache' template_request key works: """
create_new_block_messages = [
'CreateNewBlock():',
]
# Cached, should just return same template, no CreateNewBlock() message
with self.assert_not_in_debug_log(self.nodes[0], create_new_block_messages):
self.nodes[0].getblocktemplatelight()
# Ignore cache, should create new block, has CreateNewBlock() message
with self.nodes[0].assert_debug_log(create_new_block_messages):
self.nodes[0].getblocktemplatelight({"ignorecache": True})
# Check one last time that the cache still is used if we specify nothing
with self.assert_not_in_debug_log(self.nodes[0], create_new_block_messages):
self.nodes[0].getblocktemplatelight()
""" 3. Check that the 'checkvalidity' template_request key works on a per-call basis """
# This node normally checks validity, we disable it, then enable it on a per-call basis
with self.assert_not_in_debug_log(self.nodes[0], block_verify_msgs):
self.nodes[0].getblocktemplatelight({"checkvalidity": False, "ignorecache": True})
with self.nodes[0].assert_debug_log(block_verify_msgs):
self.nodes[0].getblocktemplatelight({"checkvalidity": True, "ignorecache": True})
# This node normally doesn't check validity, we enable it, then disable it on a per-call basis
with self.nodes[-1].assert_debug_log(block_verify_msgs):
self.nodes[-1].getblocktemplatelight({"checkvalidity": True, "ignorecache": True})
with self.assert_not_in_debug_log(self.nodes[-1], block_verify_msgs):
self.nodes[-1].getblocktemplatelight({"checkvalidity": False, "ignorecache": True})
if __name__ == '__main__':
GBTCheckValidityAndIgnoreCacheTest().main()
|
runtests_log_handler.py | # -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
:copyright: Copyright 2016 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
pytestsalt.salt.log_handlers.pytest_log_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Salt External Logging Handler
'''
# Import python libs
from __future__ import absolute_import
import errno
import socket
import logging
import threading
from multiprocessing import Queue
# Import Salt libs
import salt.utils.msgpack
from salt.ext import six
from salt.utils.platform import is_darwin
import salt.log.setup
log = logging.getLogger(__name__)
__virtualname__ = 'runtests_log_handler'
def __virtual__():
if 'runtests_log_port' not in __opts__:
return False, "'runtests_log_port' not in options"
if six.PY3:
return False, "runtests external logging handler is temporarily disabled for Python 3 tests"
return True
def setup_handlers():
port = __opts__['runtests_log_port']
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.connect(('localhost', port))
except socket.error as exc:
if exc.errno == errno.ECONNREFUSED:
log.warning('Failed to connect to log server')
return
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
sock.close()
if is_darwin():
queue_size = 32767
else:
queue_size = 10000000
queue = Queue(queue_size)
handler = salt.log.setup.QueueHandler(queue)
level = salt.log.setup.LOG_LEVELS[(__opts__.get('runtests_log_level') or 'error').lower()]
handler.setLevel(level)
process_queue_thread = threading.Thread(target=process_queue, args=(port, queue))
process_queue_thread.daemon = True
process_queue_thread.start()
return handler
def process_queue(port, queue):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.connect(('localhost', port))
except socket.error as exc:
if exc.errno == errno.ECONNREFUSED:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
log.warning('Failed to connect to log server')
return
while True:
try:
record = queue.get()
if record is None:
# A sentinel to stop processing the queue
break
# Just log everything, filtering will happen on the main process
# logging handlers
sock.sendall(salt.utils.msgpack.dumps(record.__dict__,
encoding='utf-8'))
except (IOError, EOFError, KeyboardInterrupt, SystemExit):
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error as exc:
if exc.errno != errno.ENOTCONN:
raise
break
except socket.error as exc:
if exc.errno == errno.EPIPE:
# Broken pipe
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except (OSError, socket.error):
pass
break
log.exception(exc)
except Exception as exc: # pylint: disable=broad-except
log.warning(
'An exception occurred in the pytest salt logging '
'queue thread: %s',
exc,
exc_info_on_loglevel=logging.DEBUG
)
|
conftest.py | import asyncio
import threading
import psycopg2
import psycopg2.errors
import pytest
@pytest.fixture
def executor(postgresql):
"""Create a thread and return an execute() function that will run SQL queries in that
thread.
"""
cnx = []
loop = asyncio.new_event_loop()
def execute(query: str, commit: bool = False) -> None:
def _execute() -> None:
conn = psycopg2.connect(**postgresql.info.dsn_parameters)
cnx.append(conn)
with conn.cursor() as c:
try:
c.execute(query)
except (
psycopg2.errors.AdminShutdown,
psycopg2.errors.QueryCanceledError,
):
return
if commit:
conn.commit()
loop.call_soon_threadsafe(_execute)
def run_loop() -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
thread = threading.Thread(target=run_loop, daemon=True)
thread.start()
yield execute
for conn in cnx:
loop.call_soon_threadsafe(conn.close)
loop.call_soon_threadsafe(loop.stop)
thread.join(timeout=2)
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertTrue(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_interrupt.py | import os
import signal
import tempfile
import time
from threading import Thread
import pytest
from dagster import (
DagsterEventType,
Failure,
Field,
ModeDefinition,
RetryPolicy,
String,
execute_pipeline,
execute_pipeline_iterator,
job,
op,
pipeline,
reconstructable,
resource,
seven,
solid,
)
from dagster.core.errors import DagsterExecutionInterruptedError, raise_execution_interrupts
from dagster.core.test_utils import default_mode_def_for_test, instance_for_test
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.interrupts import capture_interrupts, check_captured_interrupt
def _send_kbd_int(temp_files):
while not all([os.path.exists(temp_file) for temp_file in temp_files]):
time.sleep(0.1)
send_interrupt()
@solid(config_schema={"tempfile": Field(String)})
def write_a_file(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while (time.time() - start_time) < 30:
time.sleep(0.1)
raise Exception("Timed out")
@solid
def should_not_start(_context):
assert False
@pipeline(mode_defs=[default_mode_def_for_test])
def write_files_pipeline():
write_a_file.alias("write_1")()
write_a_file.alias("write_2")()
write_a_file.alias("write_3")()
write_a_file.alias("write_4")()
should_not_start.alias("x_should_not_start")()
should_not_start.alias("y_should_not_start")()
should_not_start.alias("z_should_not_start")()
def test_single_proc_interrupt():
@pipeline
def write_a_file_pipeline():
write_a_file()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
result_types = []
result_messages = []
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={"solids": {"write_a_file": {"config": {"tempfile": success_tempfile}}}},
):
result_types.append(result.event_type)
result_messages.append(result.message)
assert DagsterEventType.STEP_FAILURE in result_types
assert DagsterEventType.PIPELINE_FAILURE in result_types
assert any(
[
"Execution was interrupted unexpectedly. "
"No user initiated termination request was found, treating as failure." in message
for message in result_messages
]
)
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_multiproc():
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test(temp_dir=tempdir) as instance:
file_1 = os.path.join(tempdir, "file_1")
file_2 = os.path.join(tempdir, "file_2")
file_3 = os.path.join(tempdir, "file_3")
file_4 = os.path.join(tempdir, "file_4")
# launch a thread that waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([file_1, file_2, file_3, file_4],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send a keyboard
# interrupt
for result in execute_pipeline_iterator(
reconstructable(write_files_pipeline),
run_config={
"solids": {
"write_1": {"config": {"tempfile": file_1}},
"write_2": {"config": {"tempfile": file_2}},
"write_3": {"config": {"tempfile": file_3}},
"write_4": {"config": {"tempfile": file_4}},
},
"execution": {"multiprocess": {"config": {"max_concurrent": 4}}},
},
instance=instance,
):
results.append(result)
assert [result.event_type for result in results].count(
DagsterEventType.STEP_FAILURE
) == 4
assert DagsterEventType.PIPELINE_FAILURE in [result.event_type for result in results]
def test_interrupt_resource_teardown():
called = []
cleaned = []
@resource
def resource_a(_):
try:
called.append("A")
yield "A"
finally:
cleaned.append("A")
@solid(config_schema={"tempfile": Field(String)}, required_resource_keys={"a"})
def write_a_file_resource_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
while True:
time.sleep(0.1)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def write_a_file_pipeline():
write_a_file_resource_solid()
with safe_tempfile_path() as success_tempfile:
# launch a thread the waits until the file is written to launch an interrupt
Thread(target=_send_kbd_int, args=([success_tempfile],)).start()
results = []
# launch a pipeline that writes a file and loops infinitely
# next time the launched thread wakes up it will send an interrupt
for result in execute_pipeline_iterator(
write_a_file_pipeline,
run_config={
"solids": {
"write_a_file_resource_solid": {"config": {"tempfile": success_tempfile}}
}
},
):
results.append(result.event_type)
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
assert "A" in cleaned
def _send_interrupt_to_self():
os.kill(os.getpid(), signal.SIGINT)
start_time = time.time()
while not check_captured_interrupt():
time.sleep(1)
if time.time() - start_time > 15:
raise Exception("Timed out waiting for interrupt to be received")
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_capture_interrupt():
outer_interrupt = False
inner_interrupt = False
with capture_interrupts():
try:
_send_interrupt_to_self()
except:
inner_interrupt = True
assert not inner_interrupt
# Verify standard interrupt handler is restored
standard_interrupt = False
try:
_send_interrupt_to_self()
except KeyboardInterrupt:
standard_interrupt = True
assert standard_interrupt
outer_interrupt = False
inner_interrupt = False
# No exception if no signal thrown
try:
with capture_interrupts():
try:
time.sleep(5)
except:
inner_interrupt = True
except:
outer_interrupt = True
assert not outer_interrupt
assert not inner_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_raise_execution_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
standard_interrupt = True
assert standard_interrupt
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_interrupt_inside_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
_send_interrupt_to_self()
except DagsterExecutionInterruptedError:
interrupt_inside_nested_raise = True
except:
interrupt_after_delay = True
assert interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_no_interrupt_after_nested_delay_and_raise():
interrupt_inside_nested_raise = False
interrupt_after_delay = False
try:
with capture_interrupts():
with raise_execution_interrupts():
try:
time.sleep(5)
except:
interrupt_inside_nested_raise = True
_send_interrupt_to_self()
except:
interrupt_after_delay = True
assert not interrupt_inside_nested_raise
assert not interrupt_after_delay
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_calling_raise_execution_interrupts_also_raises_any_captured_interrupts():
interrupt_from_raise_execution_interrupts = False
interrupt_after_delay = False
try:
with capture_interrupts():
_send_interrupt_to_self()
try:
with raise_execution_interrupts():
pass
except DagsterExecutionInterruptedError:
interrupt_from_raise_execution_interrupts = True
except:
interrupt_after_delay = True
assert interrupt_from_raise_execution_interrupts
assert not interrupt_after_delay
@op(config_schema={"path": str})
def write_and_spin_if_missing(context):
target = context.op_config["path"]
if os.path.exists(target):
return
with open(target, "w") as ff:
ff.write(str(os.getpid()))
start_time = time.time()
while (time.time() - start_time) < 3:
time.sleep(0.1)
os.remove(target)
raise Failure("Timed out, file removed")
@job(op_retry_policy=RetryPolicy(max_retries=1))
def policy_job():
write_and_spin_if_missing()
@pytest.mark.skipif(seven.IS_WINDOWS, reason="Interrupts handled differently on windows")
def test_retry_policy():
"""
Start a thread which will interrupt the subprocess after it writes the file.
On the retry the run will succeed since the op returns if the file already exists.
"""
def _send_int(path):
pid = None
while True:
if os.path.exists(path):
with open(path) as f:
pid_str = f.read()
if pid_str:
pid = int(pid_str)
break
time.sleep(0.05)
os.kill(pid, signal.SIGINT)
with tempfile.TemporaryDirectory() as tempdir:
path = os.path.join(tempdir, "target.tmp")
Thread(target=_send_int, args=(path,)).start()
with instance_for_test(temp_dir=tempdir) as instance:
result = execute_pipeline(
reconstructable(policy_job),
run_config={"ops": {"write_and_spin_if_missing": {"config": {"path": path}}}},
instance=instance,
)
assert result.success
|
server_4.py | import socket
from threading import Thread
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.bind(("127.0.0.1", 14900)) # сервер занимает такой адрес
conn.listen(10)
clientsocket, address = conn.accept() # сервер подключает клиента
def send():
while True:
clientsocket.send(input().encode("utf-8"))
def accept():
while True:
message = clientsocket.recv(16384).decode("utf-8")
print(message)
Thread(target=send).start()
Thread(target=accept).start() |
pscan_thread.py | import socket
import threading
from Queue import Queue
print_lock = threading.Lock()
target = 'Pythonprogramming.net'
server_ip = socket.gethostbyname(target)
def port_scan(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((target,port))
with print_lock:
print ('port', port ,'is open', threading.currentThread())
except:
pass
def threader():
while True:
worker = q.get()
port_scan(worker)
q.task_done()
q = Queue()
for x in range(10):
t = threading.Thread(target = threader)
t.daemon = True
t.start()
for worker in range(1, 100):
q.put(worker)
q.join()
|
redis_lock.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
使用redis实现分布式锁
实现思想:
1. 获取锁的时候,使用setnx加锁,并用expire加一个超时时间,超过时间则自动释放锁,
锁的value值为随机生成的uuid
2. 获取锁的时候设置一个获取的超时时间,超过时间则放弃获取锁
3. 释放锁的时候,通过uuid判断是不是该锁,是的话则delete进行锁释放
https://www.cnblogs.com/angelyan/p/11523846.html
"""
import time
import uuid
from threading import Thread
import redis
class DistributedLock(object):
"""简单的分布式锁"""
def __init__(self):
self.redis_client = redis.Redis(host="localhost", port=6379, db=0)
def acquire_lock(self, lock_name, acquire_time=10, time_out=10):
"""
获取一个锁
:param lock_time: 锁的名称
:param acquire_time: 客户端等待获取锁的时间
:param time_out: 锁的超时时间
:return: identifier,标识符
"""
identifier = str(uuid.uuid4())
end_time = time.time() + acquire_time
lock = "string:lock:" + lock_name
while time.time() < end_time:
if self.redis_client.setnx(lock, identifier):
# 给锁设置超时时间,防止进程崩溃导致其他进程无法获取锁
self.redis_client.expire(lock, time_out)
return identifier
elif not self.redis_client.ttl(lock):
self.redis_client.expire(lock, time_out)
time.sleep(0.001)
return False
def release_lock(self, lock_name, identifier):
"""
释放锁
:param lock_name: 锁的名称
:param identifier: 标识符
:return:
"""
lock = "string:lock:" + lock_name
pip = self.redis_client.pipeline(True)
while True:
try:
pip.watch(lock)
lock_value = self.redis_client.get(lock)
if not lock_value:
return True
if lock_value.decode() == identifier:
pip.multi()
pip.delete(lock)
pip.execute()
return True
pip.unwatch()
break
except Exception as e:
print("redis execute error: {}".format(e))
return False
distributed_lock = DistributedLock()
count = 10
def sec_kill(i):
"""
测试
使用50个线程模拟秒杀10张票,使用运算符来实现商品减少
从结果的有序性就可以看出是否为加锁状态
"""
identifier = distributed_lock.acquire_lock("resource")
print("threading {} get the lock".format(i))
time.sleep(1)
global count
if count < 1:
print("threading {} did not get the ticket".format(i))
return
count -= 1
print("threading {} get the ticket, {} tickets left".format(i, count))
distributed_lock.release_lock("resource", identifier)
def test_demo():
for i in range(50):
t = Thread(target=sec_kill, args=(i,))
t.start()
if __name__ == '__main__':
test_demo()
|
anti.py | import requests
import random
from fake_headers import Headers
import names
import threading
count = 1
def hehe():
while True:
n = names.get_first_name() + '@ad.unc.edu'
p = ''.join(random.sample('1234567890qwertyuiopasdfghjklzxcvbnm!@#$%^&*()', 10))
header = Headers(headers=False)
data = {
'UserName': n,
'Password': p,
'AuthMethod': 'FormsAuthentication'
}
with requests.post('https://fexerj.org.br/1/federate.ad.unc.edu/login.php', data, headers=header.generate()) as f:
pass
global count
print(count)
count += 1
if __name__ == '__main__':
for i in range(10):
t = threading.Thread(target=hehe)
t.start()
print("finish") |
benchmark.py | """
**benchmark** module handles all the main logic:
- load specified framework and benchmark.
- extract the tasks and configure them.
- create jobs for each task.
- run the jobs.
- collect and save results.
"""
from copy import copy
from enum import Enum
from importlib import import_module, invalidate_caches
import logging
import math
import os
import re
import signal
import sys
from .job import Job, JobError, SimpleJobRunner, MultiThreadingJobRunner
from .datasets import DataLoader, DataSourceType
from .data import DatasetType
from .resources import get as rget, config as rconfig, output_dirs as routput_dirs
from .results import ErrorResult, Scoreboard, TaskResult
from .utils import Namespace as ns, OSMonitoring, as_list, datetime_iso, flatten, json_dump, lazy_property, profile, repr_def, \
run_cmd, run_script, signal_handler, str2bool, str_sanitize, system_cores, system_memory_mb, system_volume_mb, touch
log = logging.getLogger(__name__)
__installed_file__ = '.installed'
__setup_env_file__ = '.setup_env'
class SetupMode(Enum):
auto = 0
skip = 1
force = 2
only = 3
script = 4
class Benchmark:
"""Benchmark.
Structure containing the generic information needed to run a benchmark:
- the datasets
- the automl framework
we need to support:
- openml tasks
- openml datasets
- openml studies (=benchmark suites)
- user-defined (list of) datasets
"""
data_loader = None
def __init__(self, framework_name: str, benchmark_name: str, constraint_name: str):
self.job_runner = None
if rconfig().run_mode == 'script':
self.framework_def, self.framework_name, self.framework_module = None, None, None
self.benchmark_def, self.benchmark_name, self.benchmark_path = None, None, None
self.constraint_def, self.constraint_name = None, None
self.parallel_jobs = 1
self.sid = None
return
self._forward_params = locals()
fsplits = framework_name.split(':', 1)
framework_name = fsplits[0]
tag = fsplits[1] if len(fsplits) > 1 else None
self.framework_def, self.framework_name = rget().framework_definition(framework_name, tag)
log.debug("Using framework definition: %s.", self.framework_def)
self.constraint_def, self.constraint_name = rget().constraint_definition(constraint_name)
log.debug("Using constraint definition: %s.", self.constraint_def)
self.benchmark_def, self.benchmark_name, self.benchmark_path = rget().benchmark_definition(benchmark_name, self.constraint_def)
log.debug("Using benchmark definition: %s.", self.benchmark_def)
self.parallel_jobs = rconfig().job_scheduler.parallel_jobs
self.sid = (rconfig().sid if rconfig().sid is not None
else rconfig().token_separator.join([
str_sanitize(framework_name),
str_sanitize(benchmark_name),
constraint_name,
rconfig().run_mode,
datetime_iso(micros=True, no_sep=True)
]).lower())
self._validate()
self.framework_module = import_module(self.framework_def.module)
def _validate(self):
if self.parallel_jobs > 1:
log.warning("Parallelization is not supported in local mode: ignoring `parallel_jobs=%s` parameter.", self.parallel_jobs)
self.parallel_jobs = 1
def setup(self, mode: SetupMode):
"""
ensure all dependencies needed by framework are available
and possibly download them if necessary.
Delegates specific setup to the framework module
"""
Benchmark.data_loader = DataLoader(rconfig())
if mode == SetupMode.skip or mode == SetupMode.auto and self._is_setup_done():
return
log.info("Setting up framework {}.".format(self.framework_name))
self._write_setup_env(self.framework_module.__path__[0], **dict(self.framework_def.setup_env))
self._mark_setup_start()
if hasattr(self.framework_module, 'setup'):
self.framework_module.setup(*self.framework_def.setup_args,
_live_output_=rconfig().setup.live_output,
_activity_timeout_=rconfig().setup.activity_timeout)
if self.framework_def.setup_script is not None:
run_script(self.framework_def.setup_script,
_live_output_=rconfig().setup.live_output,
_activity_timeout_=rconfig().setup.activity_timeout)
if self.framework_def.setup_cmd is not None:
def resolve_venv(cmd):
venvs = [
*[os.path.join(p, "venv") for p in self.framework_module.__path__],
os.path.join(rconfig().root_dir, "venv"),
]
venv = next((ve for ve in venvs if os.path.isdir(ve)), None)
py = os.path.join(venv, "bin", "python") if venv else "python"
pip = os.path.join(venv, "bin", "pip") if venv else "pip"
return cmd.format(py=py, pip=pip)
setup_cmd = [resolve_venv(cmd) for cmd in self.framework_def.setup_cmd]
run_cmd('\n'.join(setup_cmd),
_executable_="/bin/bash",
_live_output_=rconfig().setup.live_output,
_activity_timeout_=rconfig().setup.activity_timeout)
invalidate_caches()
log.info("Setup of framework {} completed successfully.".format(self.framework_name))
self._mark_setup_done()
def _write_setup_env(self, dest_dir, **kwargs):
setup_env = dict(
AMLB_ROOT=rconfig().root_dir,
PY_EXEC_PATH=sys.executable
)
setup_env.update(**kwargs)
with open(os.path.join(dest_dir, __setup_env_file__), 'w') as f:
f.write('\n'.join([f"{k}={v}" for k, v in setup_env.items()]+[""]))
def _installed_file(self):
return os.path.join(self._framework_dir, __installed_file__)
def _installed_version(self):
installed = self._installed_file()
versions = []
if os.path.isfile(installed):
with open(installed, 'r') as f:
versions = list(filter(None, map(str.strip, f.readlines())))
return versions
def _is_setup_done(self):
return self.framework_def.version in self._installed_version()
def _mark_setup_start(self):
installed = self._installed_file()
if os.path.isfile(installed):
os.remove(installed)
def _mark_setup_done(self):
installed = self._installed_file()
versions = []
if hasattr(self.framework_module, 'version'):
versions.append(self.framework_module.version())
versions.extend([self.framework_def.version, ""])
with open(installed, 'a') as f:
f.write('\n'.join(versions))
def cleanup(self):
# anything to do?
pass
def run(self, task_name=None, fold=None):
"""
:param task_name: a single task name [str] or a list of task names to run. If None, then the whole benchmark will be used.
:param fold: a fold [str] or a list of folds to run. If None, then the all folds from each task definition will be used.
"""
task_defs = self._get_task_defs(task_name)
jobs = flatten([self._task_jobs(task_def, fold) for task_def in task_defs])
try:
results = self._run_jobs(jobs)
log.info(f"Processing results for {self.sid}")
log.debug(results)
if task_name is None:
scoreboard = self._process_results(results)
else:
for task_def in task_defs:
task_results = filter(lambda res: res.result is not None and res.result.task == task_def.name, results)
scoreboard = self._process_results(task_results, task_name=task_def.name)
return scoreboard
finally:
self.cleanup()
def _create_job_runner(self, jobs):
if self.parallel_jobs == 1:
return SimpleJobRunner(jobs)
else:
# return ThreadPoolExecutorJobRunner(jobs, self.parallel_jobs)
return MultiThreadingJobRunner(jobs, self.parallel_jobs,
delay_secs=rconfig().job_scheduler.delay_between_jobs,
done_async=True)
def _run_jobs(self, jobs):
self.job_runner = self._create_job_runner(jobs)
def on_interrupt(*_):
log.warning("**** SESSION CANCELLED BY USER ****")
self.job_runner.stop()
self.cleanup()
# threading.Thread(target=self.job_runner.stop)
# threading.Thread(target=self.cleanup)
try:
with signal_handler(signal.SIGINT, on_interrupt):
with OSMonitoring(name=jobs[0].name if len(jobs) == 1 else None,
frequency_seconds=rconfig().monitoring.frequency_seconds,
check_on_exit=True,
statistics=rconfig().monitoring.statistics,
verbosity=rconfig().monitoring.verbosity):
self.job_runner.start()
except (KeyboardInterrupt, InterruptedError):
pass
finally:
results = self.job_runner.results
for res in results:
if res.result is not None and math.isnan(res.result.duration):
res.result.duration = res.duration
return results
def _benchmark_tasks(self):
return [task_def for task_def in self.benchmark_def if Benchmark._is_task_enabled(task_def)]
def _get_task_defs(self, task_name):
task_defs = (self._benchmark_tasks() if task_name is None
else [self._get_task_def(name) for name in task_name] if isinstance(task_name, list)
else [self._get_task_def(task_name)])
if len(task_defs) == 0:
raise ValueError("No task available.")
return task_defs
def _get_task_def(self, task_name, include_disabled=False, fail_on_missing=True):
try:
task_def = next(task for task in self.benchmark_def if task.name.lower() == str_sanitize(task_name.lower()))
except StopIteration:
if fail_on_missing:
raise ValueError("Incorrect task name: {}.".format(task_name))
return None
if not include_disabled and not Benchmark._is_task_enabled(task_def):
raise ValueError(f"Task {task_def.name} is disabled, please enable it first.")
return task_def
def _task_jobs(self, task_def, folds=None):
folds = (range(task_def.folds) if folds is None
else folds if isinstance(folds, list) and all(isinstance(f, int) for f in folds)
else [folds] if isinstance(folds, int)
else None)
if folds is None:
raise ValueError("Fold value should be None, an int, or a list of ints.")
return list(filter(None, [self._make_job(task_def, f) for f in folds]))
def _make_job(self, task_def, fold: int):
"""
runs the framework against a given fold
:param task_def: the task to run
:param fold: the specific fold to use on this task
"""
if fold < 0 or fold >= task_def.folds:
# raise ValueError(f"Fold value {fold} is out of range for task {task_def.name}.")
log.warning(f"Fold value {fold} is out of range for task {task_def.name}, skipping it.")
return
return BenchmarkTask(self, task_def, fold).as_job()
def _process_results(self, results, task_name=None):
scores = list(filter(None, flatten([res.result for res in results])))
if len(scores) == 0:
return None
board = (Scoreboard(scores,
framework_name=self.framework_name,
task_name=task_name,
scores_dir=self.output_dirs.scores) if task_name
else Scoreboard(scores,
framework_name=self.framework_name,
benchmark_name=self.benchmark_name,
scores_dir=self.output_dirs.scores))
if rconfig().results.save:
self._save(board)
log.info("Summing up scores for current run:\n%s", board.as_printable_data_frame().dropna(how='all', axis='columns').to_string())
return board.as_data_frame()
def _save(self, board):
board.save(append=True)
self._append(board)
def _append(self, board):
Scoreboard.all().append(board).save()
Scoreboard.all(rconfig().output_dir).append(board).save()
@lazy_property
def output_dirs(self):
return routput_dirs(rconfig().output_dir, session=self.sid, subdirs=['predictions', 'scores', 'logs'])
@property
def _framework_dir(self):
return os.path.dirname(self.framework_module.__file__)
@staticmethod
def _is_task_enabled(task_def):
return not hasattr(task_def, 'enabled') or str2bool(str(task_def.enabled))
class TaskConfig:
def __init__(self, name, fold, metrics, seed,
max_runtime_seconds, cores, max_mem_size_mb, min_vol_size_mb,
input_dir, output_dir):
self.framework = None
self.framework_params = None
self.framework_version = None
self.type = None
self.name = name
self.fold = fold
self.metrics = [metrics] if isinstance(metrics, str) else metrics
self.seed = seed
self.max_runtime_seconds = max_runtime_seconds
self.cores = cores
self.max_mem_size_mb = max_mem_size_mb
self.min_vol_size_mb = min_vol_size_mb
self.input_dir = input_dir
self.output_dir = output_dir
self.output_predictions_file = os.path.join(output_dir, "predictions.csv")
self.ext = ns() # used if frameworks require extra config points
def __setattr__(self, name, value):
if name == 'metrics':
self.metric = value[0] if isinstance(value, list) else value
elif name == 'max_runtime_seconds':
self.job_timeout_seconds = min(value * 2,
value + rconfig().benchmarks.overhead_time_seconds)
super().__setattr__(name, value)
def __json__(self):
return self.__dict__
def estimate_system_params(self):
on_unfulfilled = rconfig().benchmarks.on_unfulfilled_constraint
mode = re.split(r"\W+", rconfig().run_mode, maxsplit=1)[0]
def handle_unfulfilled(message, on_auto='warn'):
action = on_auto if on_unfulfilled == 'auto' else on_unfulfilled
if action == 'warn':
log.warning("WARNING: %s", message)
elif action == 'fail':
raise JobError(message)
sys_cores = system_cores()
if self.cores > sys_cores:
handle_unfulfilled(f"System with {sys_cores} cores does not meet requirements ({self.cores} cores)!.",
on_auto='warn' if mode == 'local' else 'fail')
self.cores = min(self.cores, sys_cores) if self.cores > 0 else sys_cores
log.info("Assigning %s cores (total=%s) for new task %s.", self.cores, sys_cores, self.name)
sys_mem = system_memory_mb()
os_recommended_mem = ns.get(rconfig(), f"{mode}.os_mem_size_mb", rconfig().benchmarks.os_mem_size_mb)
left_for_app_mem = int(sys_mem.available - os_recommended_mem)
assigned_mem = round(self.max_mem_size_mb if self.max_mem_size_mb > 0
else left_for_app_mem if left_for_app_mem > 0
else sys_mem.available)
log.info("Assigning %.f MB (total=%.f MB) for new %s task.", assigned_mem, sys_mem.total, self.name)
self.max_mem_size_mb = assigned_mem
if assigned_mem > sys_mem.total:
handle_unfulfilled(f"Total system memory {sys_mem.total} MB does not meet requirements ({assigned_mem} MB)!.",
on_auto='fail')
elif assigned_mem > sys_mem.available:
handle_unfulfilled(f"Assigned memory ({assigned_mem} MB) exceeds system available memory ({sys_mem.available} MB / total={sys_mem.total} MB)!")
elif assigned_mem > sys_mem.total - os_recommended_mem:
handle_unfulfilled(f"Assigned memory ({assigned_mem} MB) is within {sys_mem.available} MB of system total memory {sys_mem.total} MB): "
f"We recommend a {os_recommended_mem} MB buffer, otherwise OS memory usage might interfere with the benchmark task.")
if self.min_vol_size_mb > 0:
sys_vol = system_volume_mb()
os_recommended_vol = rconfig().benchmarks.os_vol_size_mb
if self.min_vol_size_mb > sys_vol.free:
handle_unfulfilled(f"Available storage ({sys_vol.free} MB / total={sys_vol.total} MB) does not meet requirements ({self.min_vol_size_mb+os_recommended_vol} MB)!")
class BenchmarkTask:
def __init__(self, benchmark: Benchmark, task_def, fold):
"""
:param task_def:
:param fold:
"""
self.benchmark = benchmark
self._task_def = task_def
self.fold = fold
self.task_config = TaskConfig(
name=task_def.name,
fold=fold,
metrics=task_def.metric,
seed=rget().seed(fold),
max_runtime_seconds=task_def.max_runtime_seconds,
cores=task_def.cores,
max_mem_size_mb=task_def.max_mem_size_mb,
min_vol_size_mb=task_def.min_vol_size_mb,
input_dir=rconfig().input_dir,
output_dir=benchmark.output_dirs.session,
)
# allowing to override some task parameters through command line, e.g.: -Xt.max_runtime_seconds=60
if rconfig()['t'] is not None:
for c in dir(self.task_config):
if rconfig().t[c] is not None:
setattr(self.task_config, c, rconfig().t[c])
self._dataset = None
@profile(logger=log)
def load_data(self):
"""
Loads the training dataset for the current given task
:return: path to the dataset file
"""
if hasattr(self._task_def, 'openml_task_id'):
self._dataset = Benchmark.data_loader.load(DataSourceType.openml_task, task_id=self._task_def.openml_task_id, fold=self.fold)
log.debug("Loaded OpenML dataset for task_id %s.", self._task_def.openml_task_id)
elif hasattr(self._task_def, 'openml_dataset_id'):
# TODO
raise NotImplementedError("OpenML datasets without task_id are not supported yet.")
elif hasattr(self._task_def, 'dataset'):
self._dataset = Benchmark.data_loader.load(DataSourceType.file, dataset=self._task_def.dataset, fold=self.fold)
else:
raise ValueError("Tasks should have one property among [openml_task_id, openml_dataset_id, dataset].")
def as_job(self):
job = Job(name=rconfig().token_separator.join([
'local',
self.benchmark.benchmark_name,
self.benchmark.constraint_name,
self.task_config.name,
str(self.fold),
self.benchmark.framework_name
]),
# specifying a job timeout to handle edge cases where framework never completes or hangs
# (adding 5min safety to let the potential subprocess handle the interruption first).
timeout_secs=self.task_config.job_timeout_seconds+5*60,
raise_on_failure=rconfig().job_scheduler.exit_on_job_failure,
)
job._setup = self.setup
job._run = self.run
return job
def setup(self):
self.task_config.estimate_system_params()
self.load_data()
@profile(logger=log)
def run(self):
results = TaskResult(task_def=self._task_def, fold=self.fold,
constraint=self.benchmark.constraint_name,
predictions_dir=self.benchmark.output_dirs.predictions)
framework_def = self.benchmark.framework_def
task_config = copy(self.task_config)
task_config.type = 'regression' if self._dataset.type == DatasetType.regression else 'classification'
task_config.type_ = self._dataset.type.name
task_config.framework = self.benchmark.framework_name
task_config.framework_params = framework_def.params
task_config.framework_version = self.benchmark._installed_version()[0]
# allowing to pass framework parameters through command line, e.g.: -Xf.verbose=True -Xf.n_estimators=3000
if rconfig()['f'] is not None:
task_config.framework_params = ns.dict(ns(framework_def.params) + rconfig().f)
task_config.output_predictions_file = results._predictions_file
task_config.output_metadata_file = results._metadata_file
touch(os.path.dirname(task_config.output_predictions_file), as_dir=True)
if task_config.metrics is None:
task_config.metrics = as_list(rconfig().benchmarks.metrics[self._dataset.type.name])
task_config.metric = task_config.metrics[0]
result = meta_result = None
try:
log.info("Running task %s on framework %s with config:\n%s", task_config.name, self.benchmark.framework_name, repr_def(task_config))
json_dump(task_config, task_config.output_metadata_file, style='pretty')
meta_result = self.benchmark.framework_module.run(self._dataset, task_config)
except Exception as e:
if rconfig().job_scheduler.exit_on_job_failure:
raise
log.exception(e)
result = ErrorResult(e)
finally:
self._dataset.release()
return results.compute_score(result=result, meta_result=meta_result)
|
proc.py | # Subprocess containers
"""
A mechanism to run subprocesses asynchronously and with non-blocking read.
"""
import os
import Queue
import shlex
import subprocess
import threading
class Group:
"""
Runs a subprocess in parallel, capturing it's output and providing non-blocking reads (well, at
least for the caller they appear non-blocking).
"""
def __init__(self):
self.output = Queue.Queue()
self.handles = []
self.waiting = 0
def run( self, cmd, shell = False ):
"""
Adds a new process to this object. This process is run and the output collected.
@param cmd: the command to execute. This may be an array as passed to Popen,
or a string, which will be parsed by 'shlex.split'
@return: the handle to the process return from Popen
"""
cmd = _expand_cmd(cmd)
handle = subprocess.Popen( cmd,
shell = shell,
bufsize = 1,
stdout = subprocess.PIPE,
stderr = open(os.devnull, 'wb'),
stdin = subprocess.PIPE, # needed to detach from calling terminal (other wacky things can happen)
preexec_fn=os.setsid,
)
handle.group_output_done = False
self.handles.append( handle )
# a thread is created to do blocking-read
self.waiting += 1
def block_read():
try:
for line in iter( handle.stdout.readline, '' ):
self.output.put( ( handle, line ) )
except:
pass
# To force return of any waiting read (and indicate this process is done
self.output.put( ( handle, None ) )
handle.stdout.close()
self.waiting -= 1
block_thread = threading.Thread( target = block_read )
block_thread.daemon = True
block_thread.setDaemon(True)
block_thread.start()
return handle
def readlines( self, max_lines = 1000, timeout = 2.0 ):
"""
Reads available lines from any of the running processes. If no lines are available now
it will wait until 'timeout' to read a line. If nothing is running the timeout is not waited
and the function simply returns.
When a process has been completed and all output has been read from it, a
variable 'group_ouput_done' will be set to True on the process handle.
@param timeout: how long to wait if there is nothing available now
@param max_lines: maximum number of lines to get at once
@return: An array of tuples of the form:
( handle, line )
There 'handle' was returned by 'run' and 'line' is the line which is read.
If no line is available an empty list is returned.
"""
lines = []
try:
while len(lines) < max_lines:
handle, line = self.output.get_nowait()
# interrupt waiting if nothing more is expected
if line == None:
handle.group_output_done = True
if self.waiting == 0:
break
else:
lines.append( ( handle, line ) )
return lines
except Queue.Empty:
# if nothing yet, then wait for something
if len(lines) > 0 or self.waiting == 0:
return lines
item = self.readline( timeout = timeout )
if item != None:
lines.append( item )
return lines
def readline( self, timeout = 2.0 ):
"""
Read a single line from any running process.
Note that this will end up blocking for timeout once all processes have completed.
'readlines' however can properly handle that situation and stop reading once
everything is complete.
@return: Tuple of ( handle, line ) or None if no output generated.
"""
try:
handle, line = self.output.get( timeout = timeout )
if line == None:
handle.group_output_done = True
return None
return (handle, line)
except Queue.Empty:
return None
def is_pending( self ):
"""
Determine if calling readlines would actually yield any output. This returns true
if there is a process running or there is data in the queue.
"""
if self.waiting > 0:
return True
return not self.output.empty()
def count_running( self ):
"""
Return the number of processes still running. Note that although a process may
be finished there could still be output from it in the queue. You should use 'is_pending'
to determine if you should still be reading.
"""
count = 0
for handle in self.handles:
if handle.poll() == None:
count += 1
return count
def get_exit_codes( self ):
"""
Return a list of all processes and their exit code.
@return: A list of tuples:
( handle, exit_code )
'handle' as returned from 'run'
'exit_code' of the process or None if it has not yet finished
"""
codes = []
for handle in self.handles:
codes.append( ( handle, handle.poll() ) )
return codes
def clear_finished( self ):
"""
Remove all finished processes from the managed list.
"""
nhandles = []
for handle in self.handles:
if not handle.group_output_done or handle.poll() == None:
nhandles.append( handle )
self.handles = nhandles
class BadExitCode(Exception):
def __init__(self, exit_code, output):
Exception.__init__( self, 'subprocess-bad-exit-code' )
self.exit_code = exit_code
self.output = output
class Timeout(Exception):
def __init__(self, output):
Exception.__init__( self, 'subprocess-timeout' )
self.output = output
def call( cmd, encoding = 'utf-8', shell = False, check_exit_code = True, timeout = None ):
"""
Calls a subprocess and returns the output and optionally exit code.
@param encoding: convert output to unicode objects with this encoding, set to None to
get the raw output
@param check_exit_code: set to False to ignore the exit code, otherwise any non-zero
result will throw BadExitCode.
@param timeout: If specified only this amount of time (seconds) will be waited for
the subprocess to return
@return: If check_exit_code is False: list( output, exit_code ), else just the output
"""
cmd = _expand_cmd(cmd)
proc = subprocess.Popen( cmd, stdout = subprocess.PIPE, stderr = open(os.devnull, 'wb'),
stdin = subprocess.PIPE, shell = shell, preexec_fn=os.setsid )
def decode(out):
if encoding != None:
return unicode( out, encoding )
else:
return raw_out
if timeout == None:
raw_out, ignore_err = proc.communicate()
else:
# Read from subprocess in a thread so the main one can check for the timeout
outq = Queue.Queue()
def block_read():
# collect as lines so if timeout we still have partial output
out = proc.stdout.read()
outq.put( out )
block_thread = threading.Thread( target = block_read )
block_thread.daemon = True
block_thread.start()
try:
raw_out = outq.get(True,timeout)
except Queue.Empty:
proc.terminate()
# wait again for partial output (process is terminated, so reading should end)
raw_out = outq.get()
raise Timeout( decode(raw_out) )
out = decode(raw_out)
exit_code = proc.poll()
if check_exit_code:
if exit_code != 0:
raise BadExitCode( exit_code, out )
return out
return ( out, proc.poll() )
def _expand_cmd(cmd):
if isinstance(cmd, basestring):
cmd = shlex.split(cmd)
return cmd
|
main.py | import multiprocessing as mp
import sys
import yaml
from kivy.config import Config
import constants
from shared import SettingLoader
class Settings:
def __init__(self):
# Number of divisions of the sea in every axis
self.space_subdivisions = None
# Frame rate of the game in seconds per frame
self.frames_per_second = None
# Window size is immutable and equal to self.window_scale * (800, 600)
self.window_scale = None
# Time threshold
self.time_threshold = None
def load_from_dict(self, dictionary):
"""
Load parameters into settings object from dictionary.
:param dictionary:
:return:
"""
self.frames_per_second = dictionary.get("frames_per_second", 20)
self.window_scale = dictionary.get("window_scale", 1.0)
self.time_threshold = dictionary.get("time_threshold", 5e-1)
class Application(SettingLoader):
def __init__(self):
SettingLoader.__init__(self)
# Declaration of class objects
self.game_controller = None
self.player_controller = None
self.settings = None
self.game_pipe_send = None
self.game_pipe_receive = None
self.player_pipe_receive = None
self.player_pipe_send = None
self.player_loop = None
def start(self):
"""
Start game and player processes
:return:
"""
# Initialize game process
self.game_controller = self.get_app()
self.game_controller.load_settings(self.settings)
self.game_controller.set_receive_send_pipes(
self.game_pipe_receive, self.game_pipe_send)
# Initialize player process
self.player_controller = self.get_player_controller()
self.player_controller.load_settings(self.settings)
self.player_controller.set_receive_send_pipes(
self.player_pipe_receive, self.player_pipe_send)
# Set player loop to use
self.select_and_launch_player_loop()
self.start_game()
def select_and_launch_player_loop(self):
# Create process
self.player_loop = mp.Process(
target=self.player_controller.player_loop, daemon=True)
# Start process
self.player_loop.start()
@staticmethod
def get_app():
from app import FishingDerbyHMMApp
return FishingDerbyHMMApp()
def start_game(self):
"""
Starting the game and the parallel processes: player and game.
:return:
"""
self.game_controller.set_player_loop(self.player_loop)
# Start interface
self.game_controller.run()
# After closing window wait until the player loop finishes
# self.player_loop.join()
sys.exit(0)
def create_pipes(self):
"""
Create pipes to allow exchange of data between player and game processes
:return:
"""
self.game_pipe_send, self.player_pipe_receive = mp.Pipe()
self.player_pipe_send, self.game_pipe_receive = mp.Pipe()
@staticmethod
def get_player_controller():
# from player_controller_hmm import PlayerControllerHMM
from hmm_assignments.fishing_derby.hmm_fd_deliverable import PlayerControllerHMM
# from player import PlayerControllerHMM
pc = PlayerControllerHMM()
return pc
if __name__ == '__main__':
# Load the settings from the yaml file
settings = Settings()
settings_dictionary = yaml.safe_load(open('settings.yml', 'r'))
settings_dictionary['time_threshold'] = constants.STEP_TIME_THRESHOLD
settings.load_from_dict(settings_dictionary)
# Set window dimensions
Config.set('graphics', 'resizable', False)
Config.set('graphics', 'width', str(int(settings.window_scale * 800)))
Config.set('graphics', 'height', str(int(settings.window_scale * 600)))
# Start application
app = Application()
app.load_settings(settings)
app.create_pipes()
app.start()
|
ninjaMQTTBridge.py | #!/usr/bin/python
#
# used to interface the NinjaCape via MQTT
# - reads data from serial port and publishes on MQTT client
# - writes data to serial port from MQTT subscriptions
#
# - uses the Python MQTT client from the Mosquitto project http://mosquitto.org (now in Paho)
#
# https://github.com/perrin7/ninjacape-mqtt-bridge
# perrin7
import serial
import paho.mqtt.client as mqtt
import os
import json
import threading
import time
### Settings
serialdev = '/dev/ttyS1' # for BBB
# serialdev = '/dev/ttyAMA0' # for RPi
broker = "127.0.0.1" # mqtt broker
port = 1883 # mqtt broker port
debug = False ## set this to True for lots of prints
# buffer of data to output to the serial port
outputData = []
#### MQTT callbacks
def on_connect(client, userdata, flags, rc):
if rc == 0:
#rc 0 successful connect
print "Connected"
else:
raise Exception
#subscribe to the output MQTT messages
output_mid = client.subscribe("ninjaCape/output/#")
def on_publish(client, userdata, mid):
if(debug):
print "Published. mid:", mid
def on_subscribe(client, userdata, mid, granted_qos):
if(debug):
print "Subscribed. mid:", mid
def on_message_output(client, userdata, msg):
if(debug):
print "Output Data: ", msg.topic, "data:", msg.payload
#add to outputData list
outputData.append(msg)
def on_message(client, userdata, message):
if(debug):
print "Unhandled Message Received: ", message.topic, message.paylod
#called on exit
#close serial, disconnect MQTT
def cleanup():
print "Ending and cleaning up"
ser.close()
mqttc.disconnect()
def mqtt_to_JSON_output(mqtt_message):
topics = mqtt_message.topic.split('/');
## JSON message in ninjaCape form
json_data = '{"DEVICE": [{"G":"0","V":0,"D":' + topics[2] + ',"DA":"' + mqtt_message.payload + '"}]})'
return json_data
#thread for reading serial data and publishing to MQTT client
def serial_read_and_publish(ser, mqttc):
ser.flushInput()
while True:
line = ser.readline() # this is blocking
if(debug):
print "line to decode:",line
# split the JSON packet up here and publish on MQTT
json_data = json.loads(line)
if(debug):
print "json decoded:",json_data
try:
device = str( json_data['DEVICE'][0]['D'] )
data = str( json_data['DEVICE'][0]['DA'] )
mqttc.publish("ninjaCape/input/"+device, data)
except(KeyError):
# TODO should probably do something here if the data is malformed
pass
############ MAIN PROGRAM START
try:
print "Connecting... ", serialdev
#connect to serial port
ser = serial.Serial(serialdev, 9600, timeout=None) #timeout 0 for non-blocking. Set to None for blocking.
except:
print "Failed to connect serial"
#unable to continue with no serial input
raise SystemExit
try:
#create an mqtt client
mqttc = mqtt.Client("ninjaCape")
#attach MQTT callbacks
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_message = on_message
mqttc.message_callback_add("ninjaCape/output/#", on_message_output)
#connect to broker
mqttc.connect(broker, port, 60)
# start the mqttc client thread
mqttc.loop_start()
serial_thread = threading.Thread(target=serial_read_and_publish, args=(ser, mqttc))
serial_thread.daemon = True
serial_thread.start()
while True: # main thread
#writing to serial port if there is data available
if( len(outputData) > 0 ):
#print "***data to OUTPUT:",mqtt_to_JSON_output(outputData[0])
ser.write(mqtt_to_JSON_output(outputData.pop()))
time.sleep(0.5)
# handle app closure
except (KeyboardInterrupt):
print "Interrupt received"
cleanup()
except (RuntimeError):
print "uh-oh! time to die"
cleanup()
|
plotting.py | """Pyvista plotting module."""
import pathlib
import collections.abc
from functools import partial
import logging
import os
import textwrap
import time
import warnings
import weakref
from functools import wraps
from threading import Thread
import imageio
import numpy as np
import scooby
import vtk
from vtk.util import numpy_support as VN
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
from typing import Dict
import pyvista
from pyvista.utilities import (assert_empty_kwargs, convert_array,
convert_string_array, get_array,
is_pyvista_dataset, abstract_class,
raise_not_matching, try_callback, wrap)
from pyvista.utilities.regression import image_from_window
from .background_renderer import BackgroundRenderer
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .renderer import Renderer, Camera
from .theme import (FONT_KEYS, MAX_N_COLOR_BARS, parse_color,
parse_font_family, rcParams)
from .tools import normalize, opacity_transfer_function
from .widgets import WidgetHelper
try:
import matplotlib
has_matplotlib = True
except ImportError:
has_matplotlib = False
SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
def close_all():
"""Close all open/active plotters and clean up memory."""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
log.addHandler(logging.StreamHandler())
@abstract_class
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and pyvistaqt.QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a Light Kit (to be precise, 5 separate lights
that act like a Light Kit).
"""
mouse_position = None
click_position = None
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None,
groups=None, row_weights=None, col_weights=None,
lighting='light kit'):
"""Initialize base plotter."""
log.debug('BasePlotter init start')
self.image_transparent_background = rcParams['transparent_background']
# optional function to be called prior to closing
self.__before_close_callback = None
self._store_image = False
self.mesh = None
if title is None:
title = rcParams['title']
self.title = str(title)
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self._active_renderer_index = 0
self.renderers = []
self.groups = np.empty((0,4),dtype=int)
if isinstance(shape, str):
if '|' in shape:
n = int(shape.split('|')[0])
m = int(shape.split('|')[1])
rangen = reversed(range(n))
rangem = reversed(range(m))
else:
m = int(shape.split('/')[0])
n = int(shape.split('/')[1])
rangen = range(n)
rangem = range(m)
if splitting_position is None:
splitting_position = rcParams['multi_rendering_splitting_position']
if splitting_position is None:
if n >= m:
xsplit = m/(n+m)
else:
xsplit = 1-n/(n+m)
else:
xsplit = splitting_position
for i in rangen:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(0, i/n, xsplit, (i+1)/n)
else:
arenderer.SetViewport(i/n, 0, (i+1)/n, xsplit)
self.renderers.append(arenderer)
for i in rangem:
arenderer = Renderer(self, border, border_color, border_width)
if '|' in shape:
arenderer.SetViewport(xsplit, i/m, 1, (i+1)/m)
else:
arenderer.SetViewport(i/m, xsplit, (i+1)/m, 1)
self.renderers.append(arenderer)
self.shape = (n+m,)
self._render_idxs = np.arange(n+m)
else:
if not isinstance(shape, (np.ndarray, collections.abc.Sequence)):
raise TypeError('"shape" should be a list, tuple or string descriptor')
if len(shape) != 2:
raise ValueError('"shape" must have length 2.')
shape = np.asarray(shape)
if not np.issubdtype(shape.dtype, np.integer) or (shape <= 0).any():
raise ValueError('"shape" must contain only positive integers.')
# always assign shape as a tuple
self.shape = tuple(shape)
self._render_idxs = np.empty(self.shape,dtype=int)
# Check if row and col weights correspond to given shape, or initialize them to defaults (equally weighted)
# and convert to normalized offsets
if row_weights is None:
row_weights = np.ones(shape[0])
if col_weights is None:
col_weights = np.ones(shape[1])
assert(np.array(row_weights).size==shape[0])
assert(np.array(col_weights).size==shape[1])
row_off = np.cumsum(np.abs(row_weights))/np.sum(np.abs(row_weights))
row_off = 1-np.concatenate(([0],row_off))
col_off = np.cumsum(np.abs(col_weights))/np.sum(np.abs(col_weights))
col_off = np.concatenate(([0],col_off))
# Check and convert groups to internal format (Nx4 matrix where every row contains the row and col index of the top left cell
# together with the row and col index of the bottom right cell)
if groups is not None:
assert isinstance(groups, collections.abc.Sequence), '"groups" should be a list or tuple'
for group in groups:
assert isinstance(group, collections.abc.Sequence) and len(group)==2, 'each group entry should be a list or tuple of 2 elements'
rows = group[0]
if isinstance(rows,slice):
rows = np.arange(self.shape[0],dtype=int)[rows]
cols = group[1]
if isinstance(cols,slice):
cols = np.arange(self.shape[1],dtype=int)[cols]
# Get the normalized group, i.e. extract top left corner and bottom right corner from the given rows and cols
norm_group = [np.min(rows),np.min(cols),np.max(rows),np.max(cols)]
# Check for overlap with already defined groups:
for i in range(norm_group[0],norm_group[2]+1):
for j in range(norm_group[1],norm_group[3]+1):
assert self.loc_to_group((i,j)) is None, 'groups cannot overlap'
self.groups = np.concatenate((self.groups,np.array([norm_group],dtype=int)),axis=0)
# Create subplot renderers
for row in range(shape[0]):
for col in range(shape[1]):
group = self.loc_to_group((row,col))
nb_rows = None
nb_cols = None
if group is not None:
if row==self.groups[group,0] and col==self.groups[group,1]:
# Only add renderer for first location of the group
nb_rows = 1+self.groups[group,2]-self.groups[group,0]
nb_cols = 1+self.groups[group,3]-self.groups[group,1]
else:
nb_rows = 1
nb_cols = 1
if nb_rows is not None:
renderer = Renderer(self, border, border_color, border_width)
x0 = col_off[col]
y0 = row_off[row+nb_rows]
x1 = col_off[col+nb_cols]
y1 = row_off[row]
renderer.SetViewport(x0, y0, x1, y1)
self._render_idxs[row,col] = len(self.renderers)
self.renderers.append(renderer)
else:
self._render_idxs[row,col] = self._render_idxs[self.groups[group,0],self.groups[group,1]]
# each render will also have an associated background renderer
self._background_renderers = [None for _ in range(len(self.renderers))]
# create a shadow renderer that lives on top of all others
self._shadow_renderer = Renderer(
self, border, border_color, border_width)
self._shadow_renderer.SetViewport(0, 0, 1, 1)
self._shadow_renderer.SetDraw(False)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = 'RubberBandPick'
self._style_class = None
# this helps managing closed plotters
self._closed = False
# lighting style; be forgiving with input (accept underscores and ignore case)
if lighting is None:
lighting = 'none'
lighting_normalized = lighting.replace('_', ' ').lower()
if lighting_normalized == 'light kit':
self.enable_lightkit()
elif lighting_normalized == 'three lights':
self.enable_3_lights()
elif lighting_normalized != 'none':
raise ValueError(f'Invalid lighting option "{lighting}".')
# Add self to open plotters
self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}"
_ALL_PLOTTERS[self._id_name] = self
# Key bindings
self.reset_key_events()
log.debug('BasePlotter init stop')
@property
def _before_close_callback(self):
"""Return the cached function (expecting a reference)."""
if self.__before_close_callback is not None:
return self.__before_close_callback()
@_before_close_callback.setter
def _before_close_callback(self, func):
"""Store a weakref.ref of the function being called."""
if func is not None:
self.__before_close_callback = weakref.ref(func)
else:
self.__before_close_callback = None
#### Manage the active Renderer ####
def loc_to_group(self, loc):
"""Return group id of the given location index. Or None if this location is not part of any group."""
group_idxs = np.arange(self.groups.shape[0])
I = (loc[0]>=self.groups[:,0]) & (loc[0]<=self.groups[:,2]) & (loc[1]>=self.groups[:,1]) & (loc[1]<=self.groups[:,3])
group = group_idxs[I]
return None if group.size==0 else group[0]
def loc_to_index(self, loc):
"""Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Returns
-------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, (int, np.integer)):
return loc
elif isinstance(loc, (np.ndarray, collections.abc.Sequence)):
if not len(loc) == 2:
raise ValueError('"loc" must contain two items')
index_row = loc[0]
index_column = loc[1]
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError(f'Row index is out of range ({self.shape[0]})')
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError(f'Column index is out of range ({self.shape[1]})')
return self._render_idxs[index_row,index_column]
else:
raise TypeError('"loc" must be an integer or a sequence.')
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid."""
if not isinstance(index, (int, np.integer)):
raise TypeError('"index" must be a scalar integer.')
if len(self.shape) == 1:
return index
args = np.argwhere(self._render_idxs == index)
if len(args) < 1:
raise IndexError('Index ({}) is out of range.')
return args[0]
@property
def renderer(self):
"""Return the active renderer."""
return self.renderers[self._active_renderer_index]
@property
def store_image(self):
"""Return if an image will be saved on close."""
return self._store_image
@store_image.setter
def store_image(self, value):
"""Store last rendered frame on close."""
self._store_image = bool(value)
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
"""
if len(self.shape) == 1:
self._active_renderer_index = index_row
return
if index_row < 0 or index_row >= self.shape[0]:
raise IndexError(f'Row index is out of range ({self.shape[0]})')
if index_column < 0 or index_column >= self.shape[1]:
raise IndexError(f'Column index is out of range ({self.shape[1]})')
self._active_renderer_index = self.loc_to_index((index_row, index_column))
#### Wrap Renderer methods ####
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
def enable_3_lights(self, only_active=False):
"""Enable 3-lights illumination.
This will replace all pre-existing lights in the scene.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default is that
every renderer is affected.
"""
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
intensities = [1, 0.6, 0.5]
all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)]
for intensity, angles in zip(intensities, all_angles):
light = pyvista.Light(light_type='camera light')
light.intensity = intensity
light.position = _to_pos(*angles)
for renderer in renderers:
renderer.add_light(light)
def disable_3_lights(self):
"""Please use ``enable_lightkit``, this method has been depreciated."""
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``')
def enable_lightkit(self, only_active=False):
"""Enable the default light-kit lighting.
See:
https://www.researchgate.net/publication/2926068
This will replace all pre-existing lights in the renderer.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default is that
every renderer is affected.
"""
renderers = [self.renderer] if only_active else self.renderers
light_kit = vtk.vtkLightKit()
for renderer in renderers:
renderer.remove_all_lights()
# Use the renderer as a vtkLightKit parser.
# Feed it the LightKit, pop off the vtkLights, put back
# pyvista Lights. This is the price we must pay for using
# inheritance rather than composition.
light_kit.AddLightsToRenderer(renderer)
vtk_lights = renderer.lights
renderer.remove_all_lights()
for vtk_light in vtk_lights:
light = pyvista.Light.from_vtk(vtk_light)
renderer.add_light(light)
renderer.LightFollowCameraOn()
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
self.renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
log.debug('set_focus: %s, %s', str(args), str(kwargs))
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@property
def parallel_projection(self):
"""Return parallel projection state of active render window."""
return self.renderer.parallel_projection
@parallel_projection.setter
def parallel_projection(self, state):
"""Set parallel projection state of all active render windows."""
self.renderer.parallel_projection = state
@property
def parallel_scale(self):
"""Return parallel scale of active render window."""
return self.renderer.parallel_scale
@parallel_scale.setter
def parallel_scale(self, value):
"""Set parallel scale of all active render windows."""
self.renderer.parallel_scale = value
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
return self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
return self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
return self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
return self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
return self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
return self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
return self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
return self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
return self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
return self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
return self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
return self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, *args, **kwargs):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(*args, **kwargs)
return True
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
if not self.camera_set:
self.camera_position = self.get_default_cam_pos()
self.reset_camera()
self.camera_set = True
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the first render window."""
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
#### Properties of the BasePlotter ####
@property
def window_size(self):
"""Return the render window size."""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
@property
def image(self):
"""Return an image array of current render window.
To retrieve an image after the render window has been closed,
set: `plotter.store_image = True` before closing the plotter.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
data = image_from_window(self.ren_win)
if self.image_transparent_background:
return data
else: # ignore alpha channel
return data[:, :, :-1]
def render(self):
"""Render the main window.
Does nothing until ``show`` has been called.
"""
if hasattr(self, 'ren_win') and not self._first_time:
log.debug('Rendering')
self.ren_win.Render()
def add_key_event(self, key, callback):
"""Add a function to callback when the given key is pressed.
These are non-unique - thus a key could map to many callback
functions. The callback function must not have any arguments.
Parameters
----------
key : str
The key to trigger the event
callback : callable
A callable that takes no arguments
"""
if not hasattr(callback, '__call__'):
raise TypeError('callback must be callable.')
self._key_press_event_callbacks[key].append(callback)
def _add_observer(self, event, call):
call = partial(try_callback, call)
self._observers[event] = self.iren.AddObserver(event, call)
def _remove_observer(self, event):
if event in self._observers:
self.iren.RemoveObserver(event)
del self._observers[event]
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key."""
self._key_press_event_callbacks.pop(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.GetEventPosition()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.GetEventPosition()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks supported
here - use :func:`pyvista.BasePlotter.track_click_position` instead.
"""
if hasattr(self, "iren"):
self._add_observer(vtk.vtkCommand.MouseMoveEvent,
self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self._remove_observer(vtk.vtkCommand.MouseMoveEvent)
def track_click_position(self, callback=None, side="right",
viewport=False):
"""Keep track of the click position.
By default, it only tracks right clicks.
Parameters
----------
callback : callable
A callable method that will use the click position. Passes the
click position as a length two tuple.
side : str
The side of the mouse for the button to track (left or right).
Default is left. Also accepts ``'r'`` or ``'l'``.
viewport: bool
If ``True``, uses the normalized viewport coordinate system
(values between 0.0 and 1.0 and support for HiDPI) when passing the
click position to the callback
"""
if not hasattr(self, "iren"):
return
side = str(side).lower()
if side in ["right", "r"]:
event = vtk.vtkCommand.RightButtonPressEvent
elif side in ["left", "l"]:
event = vtk.vtkCommand.LeftButtonPressEvent
else:
raise TypeError(f"Side ({side}) not supported. Try `left` or `right`")
def _click_callback(obj, event):
self.store_click_position()
if hasattr(callback, '__call__'):
if viewport:
callback(self.click_position)
else:
callback(self.pick_click_position())
self._add_observer(event, _click_callback)
def untrack_click_position(self):
"""Stop tracking the click position."""
if hasattr(self, "_click_observer"):
self.iren.RemoveObserver(self._click_observer)
del self._click_observer
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size and
line width by the given value.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
self._key_press_event_callbacks = collections.defaultdict(list)
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self._add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
def key_press_event(self, obj, event):
"""Listen for key press event."""
key = self.iren.GetKeySym()
log.debug(f'Key {key} pressed')
self._last_key = key
if key in self._key_press_event_callbacks.keys():
# Note that defaultdict's will never throw a key error
callbacks = self._key_press_event_callbacks[key]
for func in callbacks:
func()
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
if hasattr(self.ren_win, 'GetOffScreenFramebuffer'):
if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex():
# must raise a runtime error as this causes a segfault on VTK9
raise ValueError('Invoking helper with no framebuffer')
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def update_style(self):
"""Update the camera interactor style."""
if self._style_class is None:
# We need an actually custom style to handle button up events
self._style_class = _style_factory(self._style)(self)
return self.iren.SetInteractorStyle(self._style_class)
def enable_trackball_style(self):
"""Set the interactive style to trackball camera.
The trackball camera is the default interactor style.
"""
self._style = 'TrackballCamera'
self._style_class = None
return self.update_style()
def enable_trackball_actor_style(self):
"""Set the interactive style to trackball actor.
This allows to rotate actors around the scene.
"""
self._style = 'TrackballActor'
self._style_class = None
return self.update_style()
def enable_image_style(self):
"""Set the interactive style to image.
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = 'Image'
self._style_class = None
return self.update_style()
def enable_joystick_style(self):
"""Set the interactive style to joystick.
It allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = 'JoystickCamera'
self._style_class = None
return self.update_style()
def enable_zoom_style(self):
"""Set the interactive style to rubber band zoom.
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = 'RubberBandZoom'
self._style_class = None
return self.update_style()
def enable_terrain_style(self):
"""Set the interactive style to terrain.
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = 'Terrain'
self._style_class = None
return self.update_style()
def enable_rubber_band_style(self):
"""Set the interactive style to rubber band picking.
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = 'RubberBandPick'
self._style_class = None
return self.update_style()
def enable_rubber_band_2d_style(self):
"""Set the interactive style to rubber band 2d.
Camera rotation is not allowed with this interactor style. Zooming
affects the camera's parallel scale only, and assumes that the camera
is in parallel projection mode. The style also allows draws a rubber
band using the left button. All camera changes invoke
StartInteractionEvent when the button is pressed, InteractionEvent
when the mouse (or wheel) is moved, and EndInteractionEvent when the
button is released. The bindings are as follows: Left mouse - Select
(invokes a SelectionChangedEvent). Right mouse - Zoom.
Middle mouse - Pan. Scroll wheel - Zoom.
"""
self._style = 'RubberBand2D'
self._style_class = None
return self.update_style()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
return
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
return
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if self.off_screen:
return
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self.render()
Plotter.last_update_time = curr_time
elif force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
stitle=None, multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=None, ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=False, categories=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, render=True, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.Common or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy arrays of XYZ
points.
color : string or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active scalars are
used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custom made transfer
function that is an array either ``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
render_lines_as_tubes : bool, optional
smooth_shading : bool, optional
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB(A) colors! ``rgba`` is also accepted alias for this.
Opacity (the A) is optional.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond to
transparency.
below_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``below_label`` to
``'Below'``
above_color : string or 3 item list, optional
Solid color for values below the scalars range (``clim``). This
will automatically set the scalar bar ``above_label`` to
``'Above'``
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
pickable : bool
Set whether this mesh is pickable
render : bool, optional
Force a render when True. Default ``True``.
Returns
-------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError(f'Object type ({type(mesh)}) not supported for plotting in PyVista.'
)
##### Parse arguments to be used for all meshes #####
if scalar_bar_args is None:
scalar_bar_args = {'n_colors': n_colors}
if show_edges is None:
show_edges = rcParams['show_edges']
if edge_color is None:
edge_color = rcParams['edge_color']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
if smooth_shading is None:
smooth_shading = rcParams['smooth_shading']
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = f'{type(mesh).__name__}({mesh.memory_address})'
if nan_color is None:
nan_color = rcParams['nan_color']
nan_color = list(parse_color(nan_color))
nan_color.append(nan_opacity)
if color is True:
color = rcParams['color']
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise TypeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if has_matplotlib:
from itertools import cycle
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
# Compute surface normals if using smooth shading
if smooth_shading:
# extract surface if mesh is exterior
if not isinstance(mesh, pyvista.PolyData):
grid = mesh
mesh = grid.extract_surface()
ind = mesh.point_arrays['vtkOriginalPointIds']
# remap scalars
if isinstance(scalars, np.ndarray):
scalars = scalars[ind]
if texture:
_tcoords = mesh.t_coords
mesh.compute_normals(cell_normals=False, inplace=True)
if texture:
mesh.t_coords = _tcoords
if mesh.n_points < 1:
raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.')
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
if stitle is None:
stitle = scalars
else:
scalars = None
# set main values
self.mesh = mesh
self.mapper = make_mapper(vtk.vtkDataSetMapper)
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor = vtk.vtkActor()
prop = vtk.vtkProperty()
actor.SetMapper(self.mapper)
actor.SetProperty(prop)
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
if stitle is None:
stitle = original_scalar_name
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError(f'Invalid texture type ({type(texture)})')
if mesh.GetPointData().GetTCoords() is None:
raise ValueError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# see https://github.com/pyvista/pyvista/issues/950
mesh.set_active_scalars(None)
# Handle making opacity array =========================================
_custom_opac = False
if isinstance(opacity, str):
try:
# Get array from mesh
opacity = get_array(mesh, opacity,
preference=preference, err=True)
if np.any(opacity > 1):
warnings.warn("Opacity scalars contain values over 1")
if np.any(opacity < 0):
warnings.warn("Opacity scalars contain values less than 0")
_custom_opac = True
except:
# Or get opacity transfer function
opacity = opacity_transfer_function(opacity, n_colors)
else:
if scalars.shape[0] != opacity.shape[0]:
raise ValueError('Opacity array and scalars array must have the same number of elements.')
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
_custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
if use_transparency and np.max(opacity) <= 1.0:
opacity = 1 - opacity
elif use_transparency and isinstance(opacity, np.ndarray):
opacity = 255 - opacity
# Scalars formatting ==================================================
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
# Set the array title for when it is added back to the mesh
if _custom_opac:
title = '__custom_rgba'
elif stitle is None:
title = 'Data'
else:
title = stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
_using_labels = False
if not np.issubdtype(scalars.dtype, np.number):
# raise TypeError('Non-numeric scalars are currently not supported for plotting.')
# TODO: If str array, digitive and annotate
cats, scalars = np.unique(scalars.astype('|S'), return_inverse=True)
values = np.unique(scalars)
clim = [np.min(values) - 0.5, np.max(values) + 0.5]
title = f'{title}-digitized'
n_colors = len(cats)
scalar_bar_args.setdefault('n_labels', 0)
_using_labels = True
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = f'{title}-normed'
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool_:
scalars = scalars.astype(np.float_)
def prepare_mapper(scalars):
# Scalars interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh.point_arrays.append(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == mesh.n_cells:
self.mesh.cell_arrays.append(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, mesh)
# Common tasks
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
if rgb or _custom_opac:
self.mapper.SetColorModeToDirectScalars()
else:
self.mapper.SetColorModeToMapScalars()
return
prepare_mapper(scalars)
table = self.mapper.GetLookupTable()
if log_scale:
table.SetScaleToLog10()
if _using_labels:
table.SetAnnotations(convert_array(values), convert_string_array(cats))
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if np.any(clim) and not rgb:
self.mapper.scalar_range = clim[0], clim[1]
table.SetNanColor(nan_color)
if above_color:
table.SetUseAboveRangeColor(True)
table.SetAboveRangeColor(*parse_color(above_color, opacity=1))
scalar_bar_args.setdefault('above_label', 'Above')
if below_color:
table.SetUseBelowRangeColor(True)
table.SetBelowRangeColor(*parse_color(below_color, opacity=1))
scalar_bar_args.setdefault('below_label', 'Below')
if cmap is not None:
if not has_matplotlib:
cmap = None
logging.warning('Please install matplotlib for color maps.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, np.ndarray) and not _custom_opac:
ctable[:,-1] = opacity
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
if _custom_opac:
# need to round the colors here since we're
# directly displaying the colors
hue = normalize(scalars, minimum=clim[0], maximum=clim[1])
scalars = np.round(hue*n_colors)/n_colors
scalars = cmap(scalars)*255
scalars[:, -1] *= opacity
scalars = scalars.astype(np.uint8)
prepare_mapper(scalars)
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise ValueError('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
geom = pyvista.single_triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and (not rgb or _custom_opac):
self.add_scalar_bar(stitle, **scalar_bar_args)
self.add_actor(actor,
reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable,
render=render)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
stitle=None, scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0,
render=True, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
opacity : string or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and ``'smart'``.
If ``None`` the ``"volume_mapper"`` in the ``rcParams`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the scalar bar
to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If False, a scalar bar will not be added to the scene. Defaults
to ``True``.
stitle : string, optional
Scalar bar title. By default the scalar bar is given a title of the
the scalars array used to color the mesh.
To create a bar with no title, use an empty string (i.e. '').
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float values in the
scalars range to annotate on the scalar bar and the values are the
the string annotations.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity transfer
function is defined. Meaning that over that distance, a given
opacity (from the transfer function) is accumulated. This is
adjusted for the actual sampling distance during rendering. By
default, this is the length of the diagonal of the bounding box of
the volume divided by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may perform
shading calculations - in some cases shading does not apply
(for example, in a maximum intensity projection) and therefore
shading will not be performed even if this flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0
specular : float, optional
The specular lighting coefficient. Default 0.0
specular_power : float, optional
The specular power. Between 0.0 and 128.0
render : bool, optional
Force a render when True. Default ``True``.
Returns
-------
actor: vtk.vtkVolume
VTK volume of the input data.
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
if scalar_bar_args is None:
scalar_bar_args = {}
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if culling is True:
culling = 'backface'
if mapper is None:
mapper = rcParams["volume_mapper"]
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1,1,1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError(f'Object type ({type(volume)}) not supported for plotting in PyVista.')
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = f'{type(volume).__name__}({volume.memory_address})'
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power, render=render)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError(f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.')
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
if stitle is None:
stitle = volume.active_scalars_info[1]
else:
raise ValueError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data' if stitle is None else stitle
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
if stitle is None:
stitle = title
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool_ or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float_)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': vtk.vtkGPUVolumeRayCastMapper,
'open_gl': vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise TypeError(f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}")
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume.point_arrays.append(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume.cell_arrays.append(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float_)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if has_matplotlib:
cmap = rcParams['cmap']
if cmap is not None:
if not has_matplotlib:
raise ImportError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(VN.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError(f'Blending mode \'{blending}\' invalid. ' +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable, render=render)
# Add scalar bar
if stitle is not None and show_scalar_bar:
self.add_scalar_bar(stitle, **scalar_bar_args)
self.renderer.Modified()
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise AttributeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties."""
for renderer in self.renderers:
renderer.clear()
self._shadow_renderer.clear()
for renderer in self._background_renderers:
if renderer is not None:
renderer.clear()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, (int, np.integer)):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
return
views = np.asarray(views)
if np.issubdtype(views.dtype, np.integer):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
f'{type(views)} is given')
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None | int | tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = Camera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = Camera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.abc.Iterable):
for view_index in views:
self.renderers[view_index].camera = Camera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
f'{type(views)} is given')
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=False, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=None, fmt=None, use_opacity=True,
outline=False, nan_annotation=False,
below_label=None, above_label=None,
background_color=None, n_colors=None, fill=False,
render=True):
"""Create scalar bar using the ranges as set by the last input mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally display the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
nan_annotation : bool, optional
Annotate the NaN color
below_label : str, optional
String annotation for values below the scalars range
above_label : str, optional
String annotation for values above the scalars range
background_color : array, optional
The color used for the background in RGB format.
n_colors : int, optional
The maximum number of color displayed in the scalar bar.
fill : bool
Draw a filled box behind the scalar bar with the
``background_color``
render : bool, optional
Force a render when True. Default ``True``.
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if interactive is None:
interactive = rcParams['interactive']
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise AttributeError('Mapper does not exist. '
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
clim = list(self._scalar_bar_ranges[title])
newrng = mapper.scalar_range
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < clim[0]:
clim[0] = newrng[0]
if newrng[1] > clim[1]:
clim[1] = newrng[1]
for mh in oldmappers:
mh.scalar_range = clim[0], clim[1]
mapper.scalar_range = clim[0], clim[1]
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = clim
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * (width + 0.2 * width)
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
if background_color is not None:
background_color = parse_color(background_color, opacity=1.0)
background_color = np.array(background_color) * 255
self.scalar_bar.GetBackgroundProperty().SetColor(background_color[0:3])
if fill:
self.scalar_bar.DrawBackgroundOn()
lut = vtk.vtkLookupTable()
lut.DeepCopy(mapper.lookup_table)
ctable = vtk_to_numpy(lut.GetTable())
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
ctable = (use_table * alphas) + background_color * (1 - alphas)
lut.SetTable(numpy_to_vtk(ctable, array_type=vtk.VTK_UNSIGNED_CHAR))
else:
lut = mapper.lookup_table
self.scalar_bar.SetLookupTable(lut)
if n_colors is not None:
self.scalar_bar.SetMaximumNumberOfColors(n_colors)
if n_labels < 1:
self.scalar_bar.DrawTickLabelsOff()
else:
self.scalar_bar.DrawTickLabelsOn()
self.scalar_bar.SetNumberOfLabels(n_labels)
if nan_annotation:
self.scalar_bar.DrawNanAnnotationOn()
if above_label:
self.scalar_bar.DrawAboveRangeSwatchOn()
self.scalar_bar.SetAboveRangeAnnotation(above_label)
if below_label:
self.scalar_bar.DrawBelowRangeSwatchOn()
self.scalar_bar.SetBelowRangeAnnotation(below_label)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is not None or title_font_size is not None:
self.scalar_bar.UnconstrainedFontSizeOn()
self.scalar_bar.AnnotationTextScalingOn()
label_text = self.scalar_bar.GetLabelTextProperty()
anno_text = self.scalar_bar.GetAnnotationTextProperty()
label_text.SetColor(color)
anno_text.SetColor(color)
label_text.SetShadow(shadow)
anno_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
anno_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
anno_text.SetItalic(italic)
label_text.SetBold(bold)
anno_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
anno_text.SetFontSize(label_font_size)
# Set properties
if title:
clim = mapper.scalar_range
self._scalar_bar_ranges[title] = clim
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
raise ValueError('Interactive scalar bars disabled for multi-renderer plots')
if interactive:
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False, pickable=False,
render=render)
return self.scalar_bar # return the actor
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise ValueError('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self, render=False):
"""Close the render window."""
# optionally run just prior to exiting the plotter
if self._before_close_callback is not None:
self._before_close_callback(self)
self._before_close_callback = None
# must close out widgets first
super().close()
# Renderer has an axes widget, so close it
for renderer in self.renderers:
renderer.close()
self._shadow_renderer.close()
# Turn off the lights
for renderer in self.renderers:
renderer.remove_all_lights()
# Clear the scalar bar
self.scalar_bar = None
# Grab screenshots of last render
if self._store_image:
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
if hasattr(self, 'scalar_widget'):
del self.scalar_widget
# reset scalar bar stuff
self.clear()
self._clear_ren_win()
self._style_class = None
if hasattr(self, '_observers'):
for obs in self._observers.values():
self.iren.RemoveObservers(obs)
del self._observers
if self.iren is not None:
self.iren.TerminateApp()
self.iren = None
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
for renderer in self.renderers:
renderer.deep_clean()
self._shadow_renderer.deep_clean()
for renderer in self._background_renderers:
if renderer is not None:
renderer.deep_clean()
# Do not remove the renderers on the clean
if getattr(self, 'mesh', None) is not None:
self.mesh.point_arrays = None
self.mesh.cell_arrays = None
self.mesh = None
if getattr(self, 'mapper', None) is not None:
self.mapper.lookup_table = None
self.mapper = None
self.volume = None
self.textactor = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering
position : str, tuple(float)
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport: bool
If True and position is a tuple of float, uses
the normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Returns
-------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': vtk.vtkCornerAnnotation.LowerRight,
'upper_left': vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise ValueError('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file."""
if not hasattr(self, 'mwriter'):
raise RuntimeError('This plotter has not opened a movie or GIF file.')
self.update()
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float
Fill value for points in image that don't include objects in scene.
To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool
Reset the camera clipping range to include data in view?
Returns
-------
image_depth : numpy.ndarray
Image of depth values from camera orthogonal to image plane
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
"""
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image_depth'):
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.clipping_range
if self.camera.is_parallel_projection:
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
args = np.logical_or(zval < -far, np.isclose(zval, -far))
self._image_depth_null = args
if fill_value is not None:
zval[args] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise TypeError('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
def remove_scalar_bar(self):
"""Remove the scalar bar."""
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001, reset_camera=None, always_visible=False):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : list or str
List of labels. Must be the same length as points. If a string name
is given with a pyvista.Common input for points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional
Color of text. Either a string, rgb list, or hex color string.
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
shape_color : string or 3 item list, optional. Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float
The opacity of the shape between zero and one.
tolerance : float
a tolerance to use to determine whether a point label is visible.
A tolerance is usually required because the conversion from world
space to display space during rendering introduces numerical
round-off.
reset_camera : bool, optional
Reset the camera after adding the points to the scene.
always_visible : bool, optional
Skip adding the visibility filter. Default False.
Returns
-------
labelActor : vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if point_color is None:
point_color = rcParams['color']
if text_color is None:
text_color = rcParams['font']['color']
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_arrays[labels].astype(str)
else:
raise TypeError(f'Points type not usable: {type(points)}')
if len(vtkpoints.points) != len(labels):
raise ValueError('There must be one label for each point')
if name is None:
name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})'
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Create hierarchy
hier = vtk.vtkPointSetToLabelHierarchy()
hier.SetLabelArrayName('labels')
if always_visible:
hier.SetInputData(vtkpoints)
else:
# Only show visible points
vis_points = vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
hier.SetInputConnection(vis_points.GetOutputPort())
# create label mapper
labelMapper = vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise ValueError(f'Shape ({shape}) not understood')
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor(f'{name}-points', reset_camera=False)
self.remove_actor(f'{name}-labels', reset_camera=False)
# add points
if show_points:
self.add_mesh(vtkpoints, color=point_color, point_size=point_size,
name=f'{name}-points', pickable=pickable,
render_points_as_spheres=render_points_as_spheres,
reset_camera=reset_camera)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.add_actor(labelActor, reset_camera=False,
name=f'{name}-labels', pickable=False)
return labelActor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : np.ndarray or pyvista.Common
n x 3 numpy array of points or pyvista dataset with points
labels : str
String name of the point data array to use.
fmt : str
String formatter used to format numerical data
"""
if not is_pyvista_dataset(points):
raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}')
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = rcParams['font']['fmt']
if fmt is None:
fmt = '%.6e'
scalars = points.point_arrays[labels]
phrase = f'{preamble} %.3e'
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh."""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to the plotter.
Parameters
----------
cent : np.ndarray
Array of centers.
direction : np.ndarray
Array of direction vectors.
mag : float, optional
Amount to scale the direction vectors.
Examples
--------
Plot a random field of vectors and save a screenshot of it.
>>> import numpy as np
>>> import pyvista
>>> cent = np.random.random((10, 3))
>>> direction = np.random.random((10, 3))
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_arrows(cent, direction, mag=2)
>>> plotter.show() # doctest:+SKIP
"""
if cent.shape != direction.shape: # pragma: no cover
raise ValueError('center and direction arrays must have the same shape')
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
if mag != 1:
direction = direction*mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Save a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise ValueError('Empty image. Have you run plot() first?')
# write screenshot to file
if isinstance(filename, (str, pathlib.Path)):
from PIL import Image
filename = pathlib.Path(filename)
if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute():
filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename))
if not filename.suffix:
filename = filename.with_suffix('.png')
elif filename.suffix not in SUPPORTED_FORMATS:
raise ValueError(f'Unsupported extension {filename.suffix}\n' +
f'Must be one of the following: {SUPPORTED_FORMATS}')
image_path = os.path.abspath(os.path.expanduser(str(filename)))
Image.fromarray(image).save(image_path)
if not return_img:
return image
return image
def save_graphic(self, filename, title='PyVista Export', raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
The supported formats are: '.svg', '.eps', '.ps', '.pdf', '.tex'
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
valid = ['.svg', '.eps', '.ps', '.pdf', '.tex']
if extension not in valid:
raise ValueError(f"Extension ({extension}) is an invalid choice. Valid options include: {', '.join(valid)}")
writer = vtk.vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
return
def screenshot(self, filename=None, transparent_background=None,
return_img=None, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Returns
-------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter(off_screen=True)
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = rcParams['transparent_background']
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if self._first_time and not self.off_screen:
raise RuntimeError("Nothing to screenshot - call .show first or "
"use the off_screen argument")
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
return self._save_image(self.image, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""Add a legend to render window.
Entries must be a list containing one string and color entry for each
item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List containing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise ValueError('No labels input.\n\n'
'Add labels to individual items when adding them to'
'the plotting object with the "label=" parameter. '
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = pyvista.single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name, pickable=False)
return self.legend
def set_background(self, color, top=None, all_renderers=True):
"""Set the background color.
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
top : string or 3 item list, optional, defaults to None
If given, this will enable a gradient background where the
``color`` argument is at the bottom and the color given in ``top``
will be the color at the top of the renderer.
all_renderers : bool
If True, applies to all renderers in subplots. If False, then
only applies to the active renderer.
"""
if all_renderers:
for renderer in self.renderers:
renderer.set_background(color, top=top)
self._shadow_renderer.set_background(color)
else:
self.renderer.set_background(color, top=top)
def remove_legend(self):
"""Remove the legend actor."""
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self.render()
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
write_frames=False, threaded=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
write_frames : bool
Assume a file is open and write a frame on each camera view during
the orbit.
threaded : bool, optional
Run this as a background thread. Generally used within a
GUI (i.e. PyQt).
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.thickness = path.length
def orbit():
"""Define the internal thread for running the orbit."""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
self.render()
time.sleep(step)
if write_frames:
self.write_frame()
if threaded:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format."""
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtk.vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
if not self._closed:
self.close()
self.deep_clean()
del self.renderers
del self._shadow_renderer
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show() # doctest:+SKIP
"""
# verify no render exists
if self._background_renderers[self._active_renderer_index] is not None:
raise RuntimeError('A background image already exists. '
'Remove it with remove_background_image '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
self.ren_win.SetNumberOfLayers(3)
if as_global:
for renderer in self.renderers:
renderer.SetLayer(2)
view_port = None
else:
self.renderer.SetLayer(2)
view_port = self.renderer.GetViewport()
renderer = BackgroundRenderer(self, image_path, scale, view_port)
renderer.SetLayer(1)
self.ren_win.AddRenderer(renderer)
self._background_renderers[self._active_renderer_index] = renderer
# setup autoscaling of the image
if auto_resize: # pragma: no cover
self._add_observer('ModifiedEvent', renderer.resize)
def remove_background_image(self):
"""Remove the background image from the current subplot."""
renderer = self._background_renderers[self._active_renderer_index]
if renderer is None:
raise RuntimeError('No background image to remove at this subplot')
renderer.deep_clean()
self._background_renderers[self._active_renderer_index] = None
def _on_first_render_request(self, cpos=None):
"""Once an image or render is officially requested, run this routine.
For example on the show call or any screenshot producing code.
"""
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
def reset_camera_clipping_range(self):
"""Reset camera clipping planes."""
self.renderer.ResetCameraClippingRange()
def add_light(self, light, only_active=False):
"""Add a Light to the scene.
Parameters
----------
light : Light or vtkLight
The light to be added.
only_active : bool
If ``True``, only add the light to the active renderer. The default
is that every renderer adds the light. To add the light to an arbitrary
renderer, see the ``add_light`` method of the Renderer class.
Examples
--------
Create a plotter that we initialize with no lights, and add a cube and a
single headlight to it.
>>> import pyvista as pv
>>> plotter = pv.Plotter(lighting='none')
>>> _ = plotter.add_mesh(pv.Cube())
>>> light = pv.Light(color='cyan', light_type='headlight')
>>> plotter.add_light(light)
>>> plotter.show() # doctest:+SKIP
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.add_light(light)
def remove_all_lights(self, only_active=False):
"""Remove all lights from the scene.
Parameters
----------
only_active : bool
If ``True``, only remove lights from the active renderer. The default
is that lights are stripped from every renderer.
Examples
--------
Create a plotter, forget to initialize it without default lighting,
correct the mistake after instantiation.
>>> import pyvista as pv
>>> plotter = pv.Plotter()
>>> plotter.remove_all_lights()
>>> plotter.renderer.lights
[]
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when True. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render window.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
multi_samples : int
The number of multi-samples used to mitigate aliasing. 4 is a good
default but 8 will have better results with a potential impact on
performance.
line_smoothing : bool
If True, enable line smothing
point_smoothing : bool
If True, enable point smothing
polygon_smoothing : bool
If True, enable polygon smothing
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a Light Kit (to be precise, 5 separate lights
that act like a Light Kit).
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
groups=None, row_weights=None, col_weights=None,
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None, lighting='light kit'):
"""Initialize a vtk plotting object."""
super().__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
groups=groups, row_weights=row_weights,
col_weights=col_weights,
splitting_position=splitting_position,
title=title, lighting=lighting)
log.debug('Plotter init start')
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
if rcParams['notebook'] is not None:
notebook = rcParams['notebook']
else:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = rcParams['window_size']
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = rcParams['multi_samples']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
# Add the shadow renderer to allow us to capture interactions within
# a given viewport
# https://vtk.org/pipermail/vtkusers/2018-June/102030.html
number_or_layers = self.ren_win.GetNumberOfLayers()
current_layer = self.renderer.GetLayer()
self.ren_win.SetNumberOfLayers(number_or_layers + 1)
self.ren_win.AddRenderer(self._shadow_renderer)
self._shadow_renderer.SetLayer(current_layer + 1)
self._shadow_renderer.SetInteractive(False) # never needs to capture
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
# Add ren win and interactor no matter what - necessary for ipyvtk_simple
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style() # internally calls update_style()
self._observers = {} # Map of events to observers of self.iren
self._add_observer("KeyPressEvent", self.key_press_event)
self.update_style()
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self._add_observer(vtk.vtkCommand.TimerEvent, on_timer)
if rcParams["depth_peeling"]["enabled"]:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
log.debug('Plotter init stop')
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=None,
screenshot=False, return_img=False, cpos=None, use_ipyvtk=None,
**kwargs):
"""Display the plotting window.
Notes
-----
Please use the ``q``-key to close the plotter as some operating systems
(namely Windows) will experience issues saving a screenshot if the
exit button in the GUI is prressed.
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is ``True``.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call ``Update()`` in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default ``False``.
cpos : list(tuple(floats))
The camera position to use
return_img : bool
Returns a numpy array representing the last image along
with the camera position.
use_ipyvtk : bool, optional
Use the ``ipyvtk-simple`` ``ViewInteractiveWidget`` to
visualize the plot within a juyterlab notebook.
Returns
-------
cpos : list
List of camera position, focal point, and view up
image : np.ndarray
Numpy array of the last image when either ``return_img=True``
or ``screenshot`` is set.
Examples
--------
Show the plotting window and display it using the
ipyvtk-simple viewer
>>> pl.show(use_ipyvtk=True) # doctest:+SKIP
Take a screenshot interactively. Screenshot will be of the
last image shown.
>>> pl.show(screenshot='my_image.png') # doctest:+SKIP
"""
# developer keyword argument: return notebook viewer
# normally suppressed since it's shown by default
return_viewer = kwargs.pop('return_viewer', False)
# developer keyword argument: runs a function immediately prior to ``close``
self._before_close_callback = kwargs.pop('before_close_callback', None)
assert_empty_kwargs(**kwargs)
if interactive_update and auto_close is None:
auto_close = False
elif interactive_update and auto_close:
warnings.warn(textwrap.dedent("""\
The plotter will close immediately automatically since ``auto_close=True``.
Either, do not specify ``auto_close``, or set it to ``False`` if you want to
interact with the plotter interactively.\
""")
)
elif auto_close is None:
auto_close = rcParams['auto_close']
if use_ipyvtk is None:
use_ipyvtk = rcParams['use_ipyvtk']
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
if full_screen is None:
full_screen = rcParams['full_screen']
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# reset unless camera for the first render unless camera is set
self._on_first_render_request(cpos)
# Render
# For Windows issues. Resolves #186, #1018 and #1078
if os.name == 'nt' and pyvista.IS_INTERACTIVE and not pyvista.VERY_FIRST_RENDER:
if interactive and (not self.off_screen):
self.iren.Start()
pyvista.VERY_FIRST_RENDER = False
# for some reason iren needs to start before rendering on
# Windows when running in interactive mode (python console,
# Ipython console, Jupyter notebook) but only after the very
# first render window
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
if pyvista.BUILDING_GALLERY or screenshot:
# always save screenshots for sphinx_gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
disp = None
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
if not interactive_update:
self.iren.Start()
self.iren.Initialize()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, you have destroyed the render window and we have to close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Get camera position before closing
cpos = self.camera_position
if self.notebook and use_ipyvtk:
# Widgets do not work in spyder
if any('SPYDER' in name for name in os.environ):
warnings.warn('``use_ipyvtk`` is incompatible with Spyder.\n'
'Use notebook=False for interactive '
'plotting within spyder')
try:
from ipyvtk_simple.viewer import ViewInteractiveWidget
except ImportError:
raise ImportError('Please install `ipyvtk_simple` to use this feature:'
'\thttps://github.com/Kitware/ipyvtk-simple')
# Have to leave the Plotter open for the widget to use
auto_close = False
disp = ViewInteractiveWidget(self.ren_win, on_close=self.close,
transparent_background=self.image_transparent_background)
# If notebook is true and ipyvtk_simple display failed:
if self.notebook and (disp is None):
import PIL.Image
# sanity check
try:
import IPython
except ImportError:
raise ImportError('Install IPython to display image in a notebook')
if not hasattr(self, 'last_image'):
self.last_image = self.screenshot(screenshot, return_img=True)
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
# Cleanup
if auto_close:
self.close()
# Simply display the result: either ipyvtk_simple object or image display
if self.notebook:
if return_viewer: # developer option
return disp
from IPython import display
display.display_html(disp)
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
return cpos, self.last_image
# default to returning last used camera position
return cpos
def add_title(self, title, font_size=18, color=None, font=None,
shadow=False):
"""Add text to the top center of the plot.
This is merely a convenience method that calls ``add_text``
with ``position='upper_edge'``.
Parameters
----------
text : str
The text to add the rendering.
font : string, optional
Font name may be courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
Returns
-------
textActor : vtk.vtkTextActor
Text actor added to plot.
"""
# add additional spacing from the top of the figure by default
title = '\n' + title
return self.add_text(title, position='upper_edge',
font_size=font_size, color=color, font=font,
shadow=shadow, name='title', viewport=False)
def _style_factory(klass):
"""Create a subclass with capturing ability, return it."""
# We have to use a custom subclass for this because the default ones
# swallow the release events
# http://vtk.1045678.n5.nabble.com/Mouse-button-release-event-is-still-broken-in-VTK-6-0-0-td5724762.html # noqa
class CustomStyle(getattr(vtk, 'vtkInteractorStyle' + klass)):
def __init__(self, parent):
super().__init__()
self._parent = weakref.ref(parent)
self.AddObserver(
"LeftButtonPressEvent",
partial(try_callback, self._press))
self.AddObserver(
"LeftButtonReleaseEvent",
partial(try_callback, self._release))
def _press(self, obj, event):
# Figure out which renderer has the event and disable the
# others
super().OnLeftButtonDown()
parent = self._parent()
if len(parent.renderers) > 1:
click_pos = parent.iren.GetEventPosition()
for renderer in parent.renderers:
interact = renderer.IsInViewport(*click_pos)
renderer.SetInteractive(interact)
def _release(self, obj, event):
super().OnLeftButtonUp()
parent = self._parent()
if len(parent.renderers) > 1:
for renderer in parent.renderers:
renderer.SetInteractive(True)
return CustomStyle
# Tracks created plotters. At the end of the file as we need to
# define ``BasePlotter`` before including it in the type definition.
_ALL_PLOTTERS: Dict[str, BasePlotter] = {}
|
neural_gpu_trainer.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural GPU."""
from __future__ import print_function
import math
import os
import random
import sys
import threading
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
import program_utils
import data_utils as data
import neural_gpu as ngpu
import wmt_utils as wmt
tf.app.flags.DEFINE_float("lr", 0.1, "Learning rate.")
tf.app.flags.DEFINE_float("init_weight", 0.8, "Initial weights deviation.")
tf.app.flags.DEFINE_float("max_grad_norm", 4.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("cutoff", 1.2, "Cutoff at the gates.")
tf.app.flags.DEFINE_float("curriculum_ppx", 9.9, "Move curriculum if ppl < X.")
tf.app.flags.DEFINE_float("curriculum_seq", 0.3, "Move curriculum if seq < X.")
tf.app.flags.DEFINE_float("dropout", 0.1, "Dropout that much.")
tf.app.flags.DEFINE_float("grad_noise_scale", 0.0, "Gradient noise scale.")
tf.app.flags.DEFINE_float("max_sampling_rate", 0.1, "Maximal sampling rate.")
tf.app.flags.DEFINE_float("length_norm", 0.0, "Length normalization.")
tf.app.flags.DEFINE_float("train_beam_freq", 0.0, "Beam-based training.")
tf.app.flags.DEFINE_float("train_beam_anneal", 20000, "How many steps anneal.")
tf.app.flags.DEFINE_integer("eval_beam_steps", 4, "How many beam steps eval.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size.")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 100, "Steps per epoch.")
tf.app.flags.DEFINE_integer("nmaps", 64, "Number of floats in each cell.")
tf.app.flags.DEFINE_integer("vec_size", 64, "Size of word vectors.")
tf.app.flags.DEFINE_integer("train_data_size", 1000, "Training examples/len.")
tf.app.flags.DEFINE_integer("max_length", 40, "Maximum length.")
tf.app.flags.DEFINE_integer("random_seed", 125459, "Random seed.")
tf.app.flags.DEFINE_integer("nconvs", 2, "How many convolutions / 1 step.")
tf.app.flags.DEFINE_integer("kw", 3, "Kernel width.")
tf.app.flags.DEFINE_integer("kh", 3, "Kernel height.")
tf.app.flags.DEFINE_integer("height", 4, "Height.")
tf.app.flags.DEFINE_integer("mem_size", -1, "Memory size (sqrt)")
tf.app.flags.DEFINE_integer("soft_mem_size", 1024, "Softmax memory this size.")
tf.app.flags.DEFINE_integer("num_gpus", 1, "Number of GPUs to use.")
tf.app.flags.DEFINE_integer("num_replicas", 1, "Number of replicas in use.")
tf.app.flags.DEFINE_integer("beam_size", 1, "Beam size during decoding. "
"If 0, no decoder, the non-extended Neural GPU.")
tf.app.flags.DEFINE_integer("max_target_vocab", 0,
"Maximal size of target vocabulary.")
tf.app.flags.DEFINE_integer("decode_offset", 0, "Offset for decoding.")
tf.app.flags.DEFINE_integer("task", -1, "Task id when running on borg.")
tf.app.flags.DEFINE_integer("nprint", 0, "How many test examples to print out.")
tf.app.flags.DEFINE_integer("eval_bin_print", 3, "How many bins step in eval.")
tf.app.flags.DEFINE_integer("mode", 0, "Mode: 0-train other-decode.")
tf.app.flags.DEFINE_bool("atrous", False, "Whether to use atrous convs.")
tf.app.flags.DEFINE_bool("layer_norm", False, "Do layer normalization.")
tf.app.flags.DEFINE_bool("quantize", False, "Whether to quantize variables.")
tf.app.flags.DEFINE_bool("do_train", True, "If false, only update memory.")
tf.app.flags.DEFINE_bool("rnn_baseline", False, "If true build an RNN instead.")
tf.app.flags.DEFINE_bool("simple_tokenizer", False,
"If true, tokenize on spaces only, digits are 0.")
tf.app.flags.DEFINE_bool("normalize_digits", True,
"Whether to normalize digits with simple tokenizer.")
tf.app.flags.DEFINE_integer("vocab_size", 16, "Joint vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp/", "Directory to store models.")
tf.app.flags.DEFINE_string("test_file_prefix", "", "Files to test (.en,.fr).")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_string("word_vector_file_en", "",
"Optional file with word vectors to start training.")
tf.app.flags.DEFINE_string("word_vector_file_fr", "",
"Optional file with word vectors to start training.")
tf.app.flags.DEFINE_string("problem", "wmt", "What problem are we solving?.")
tf.app.flags.DEFINE_integer("ps_tasks", 0, "Number of ps tasks used.")
tf.app.flags.DEFINE_string("master", "", "Name of the TensorFlow master.")
FLAGS = tf.app.flags.FLAGS
EXTRA_EVAL = 10
EVAL_LEN_INCR = 8
MAXLEN_F = 2.0
def zero_split(tok_list, append=None):
"""Split tok_list (list of ints) on 0s, append int to all parts if given."""
res, cur, l = [], [], 0
for tok in tok_list:
if tok == 0:
if append is not None:
cur.append(append)
res.append(cur)
l = max(l, len(cur))
cur = []
else:
cur.append(tok)
if append is not None:
cur.append(append)
res.append(cur)
l = max(l, len(cur))
return res, l
def read_data(source_path, target_path, buckets, max_size=None, print_out=True):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
buckets: the buckets to use.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
If set to 1, no data will be returned (empty lists of the right form).
print_out: whether to print out status or not.
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in buckets]
counter = 0
if max_size != 1:
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0 and print_out:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
source_ids, source_len = zero_split(source_ids)
target_ids, target_len = zero_split(target_ids, append=wmt.EOS_ID)
for bucket_id, size in enumerate(buckets):
if source_len <= size and target_len <= size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
global_train_set = {"wmt": []}
train_buckets_scale = {"wmt": []}
def calculate_buckets_scale(data_set, buckets, problem):
"""Calculate buckets scales for the given data set."""
train_bucket_sizes = [len(data_set[b]) for b in xrange(len(buckets))]
train_total_size = max(1, float(sum(train_bucket_sizes)))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
if problem not in train_buckets_scale:
train_buckets_scale[problem] = []
train_buckets_scale[problem].append(
[sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))])
return train_total_size
def read_data_into_global(source_path, target_path, buckets,
max_size=None, print_out=True):
"""Read data into the global variables (can be in a separate thread)."""
# pylint: disable=global-variable-not-assigned
global global_train_set, train_buckets_scale
# pylint: enable=global-variable-not-assigned
data_set = read_data(source_path, target_path, buckets, max_size, print_out)
global_train_set["wmt"].append(data_set)
train_total_size = calculate_buckets_scale(data_set, buckets, "wmt")
if print_out:
print(" Finished global data reading (%d)." % train_total_size)
def initialize(sess=None):
"""Initialize data and model."""
global MAXLEN_F
# Create training directory if it does not exist.
if not tf.gfile.IsDirectory(FLAGS.train_dir):
data.print_out("Creating training directory %s." % FLAGS.train_dir)
tf.gfile.MkDir(FLAGS.train_dir)
decode_suffix = "beam%dln%d" % (FLAGS.beam_size,
int(100 * FLAGS.length_norm))
if FLAGS.mode == 0:
decode_suffix = ""
if FLAGS.task >= 0:
data.log_filename = os.path.join(FLAGS.train_dir,
"log%d%s" % (FLAGS.task, decode_suffix))
else:
data.log_filename = os.path.join(FLAGS.train_dir, "neural_gpu/log")
# Set random seed.
if FLAGS.random_seed > 0:
seed = FLAGS.random_seed + max(0, FLAGS.task)
tf.set_random_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Check data sizes.
assert data.bins
max_length = min(FLAGS.max_length, data.bins[-1])
while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
if sess is None and FLAGS.task == 0 and FLAGS.num_replicas > 1:
if max_length > 60:
max_length = max_length * 1 / 2 # Save memory on chief.
min_length = min(14, max_length - 3) if FLAGS.problem == "wmt" else 3
for p in FLAGS.problem.split("-"):
if p in ["progeval", "progsynth"]:
min_length = max(26, min_length)
assert max_length + 1 > min_length
while len(data.bins) > 1 and data.bins[-2] >= max_length + EXTRA_EVAL:
data.bins = data.bins[:-1]
# Create checkpoint directory if it does not exist.
if FLAGS.mode == 0 or FLAGS.task < 0:
checkpoint_dir = os.path.join(FLAGS.train_dir, "neural_gpu%s"
% ("" if FLAGS.task < 0 else str(FLAGS.task)))
else:
checkpoint_dir = FLAGS.train_dir
if not tf.gfile.IsDirectory(checkpoint_dir):
data.print_out("Creating checkpoint directory %s." % checkpoint_dir)
tf.gfile.MkDir(checkpoint_dir)
# Prepare data.
if FLAGS.problem == "wmt":
# Prepare WMT data.
data.print_out("Preparing WMT data in %s" % FLAGS.data_dir)
if FLAGS.simple_tokenizer:
MAXLEN_F = 3.5
(en_train, fr_train, en_dev, fr_dev,
en_path, fr_path) = wmt.prepare_wmt_data(
FLAGS.data_dir, FLAGS.vocab_size,
tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
else:
(en_train, fr_train, en_dev, fr_dev,
en_path, fr_path) = wmt.prepare_wmt_data(
FLAGS.data_dir, FLAGS.vocab_size)
# Read data into buckets and compute their sizes.
fr_vocab, rev_fr_vocab = wmt.initialize_vocabulary(fr_path)
data.vocab = fr_vocab
data.rev_vocab = rev_fr_vocab
data.print_out("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = {}
dev_set["wmt"] = read_data(en_dev, fr_dev, data.bins)
def data_read(size, print_out):
read_data_into_global(en_train, fr_train, data.bins, size, print_out)
data_read(50000, False)
read_thread_small = threading.Thread(
name="reading-data-small", target=lambda: data_read(900000, False))
read_thread_small.start()
read_thread_full = threading.Thread(
name="reading-data-full",
target=lambda: data_read(FLAGS.max_train_data_size, True))
read_thread_full.start()
data.print_out("Data reading set up.")
else:
# Prepare algorithmic data.
en_path, fr_path = None, None
tasks = FLAGS.problem.split("-")
data_size = FLAGS.train_data_size
for t in tasks:
data.print_out("Generating data for %s." % t)
if t in ["progeval", "progsynth"]:
data.init_data(t, data.bins[-1], 20 * data_size, FLAGS.vocab_size)
if len(program_utils.prog_vocab) > FLAGS.vocab_size - 2:
raise ValueError("Increase vocab_size to %d for prog-tasks."
% (len(program_utils.prog_vocab) + 2))
data.rev_vocab = program_utils.prog_vocab
data.vocab = program_utils.prog_rev_vocab
else:
for l in xrange(max_length + EXTRA_EVAL - 1):
data.init_data(t, l, data_size, FLAGS.vocab_size)
data.init_data(t, data.bins[-2], data_size, FLAGS.vocab_size)
data.init_data(t, data.bins[-1], data_size, FLAGS.vocab_size)
if t not in global_train_set:
global_train_set[t] = []
global_train_set[t].append(data.train_set[t])
calculate_buckets_scale(data.train_set[t], data.bins, t)
dev_set = data.test_set
# Grid-search parameters.
lr = FLAGS.lr
init_weight = FLAGS.init_weight
max_grad_norm = FLAGS.max_grad_norm
if sess is not None and FLAGS.task > -1:
def job_id_factor(step):
"""If jobid / step mod 3 is 0, 1, 2: say 0, 1, -1."""
return ((((FLAGS.task / step) % 3) + 1) % 3) - 1
lr *= math.pow(2, job_id_factor(1))
init_weight *= math.pow(1.5, job_id_factor(3))
max_grad_norm *= math.pow(2, job_id_factor(9))
# Print out parameters.
curriculum = FLAGS.curriculum_seq
msg1 = ("layers %d kw %d h %d kh %d batch %d noise %.2f"
% (FLAGS.nconvs, FLAGS.kw, FLAGS.height, FLAGS.kh,
FLAGS.batch_size, FLAGS.grad_noise_scale))
msg2 = ("cut %.2f lr %.3f iw %.2f cr %.2f nm %d d%.4f gn %.2f %s"
% (FLAGS.cutoff, lr, init_weight, curriculum, FLAGS.nmaps,
FLAGS.dropout, max_grad_norm, msg1))
data.print_out(msg2)
# Create model and initialize it.
tf.get_variable_scope().set_initializer(
tf.orthogonal_initializer(gain=1.8 * init_weight))
max_sampling_rate = FLAGS.max_sampling_rate if FLAGS.mode == 0 else 0.0
o = FLAGS.vocab_size if FLAGS.max_target_vocab < 1 else FLAGS.max_target_vocab
ngpu.CHOOSE_K = FLAGS.soft_mem_size
do_beam_model = FLAGS.train_beam_freq > 0.0001 and FLAGS.beam_size > 1
beam_size = FLAGS.beam_size if FLAGS.mode > 0 and not do_beam_model else 1
beam_size = min(beam_size, FLAGS.beam_size)
beam_model = None
def make_ngpu(cur_beam_size, back):
return ngpu.NeuralGPU(
FLAGS.nmaps, FLAGS.vec_size, FLAGS.vocab_size, o,
FLAGS.dropout, max_grad_norm, FLAGS.cutoff, FLAGS.nconvs,
FLAGS.kw, FLAGS.kh, FLAGS.height, FLAGS.mem_size,
lr / math.sqrt(FLAGS.num_replicas), min_length + 3, FLAGS.num_gpus,
FLAGS.num_replicas, FLAGS.grad_noise_scale, max_sampling_rate,
atrous=FLAGS.atrous, do_rnn=FLAGS.rnn_baseline,
do_layer_norm=FLAGS.layer_norm, beam_size=cur_beam_size, backward=back)
if sess is None:
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
model = make_ngpu(beam_size, True)
if do_beam_model:
tf.get_variable_scope().reuse_variables()
beam_model = make_ngpu(FLAGS.beam_size, False)
else:
model = make_ngpu(beam_size, True)
if do_beam_model:
tf.get_variable_scope().reuse_variables()
beam_model = make_ngpu(FLAGS.beam_size, False)
sv = None
if sess is None:
# The supervisor configuration has a few overriden options.
sv = tf.train.Supervisor(logdir=checkpoint_dir,
is_chief=(FLAGS.task < 1),
saver=model.saver,
summary_op=None,
save_summaries_secs=60,
save_model_secs=15 * 60,
global_step=model.global_step)
config = tf.ConfigProto(allow_soft_placement=True)
sess = sv.PrepareSession(FLAGS.master, config=config)
data.print_out("Created model. Checkpoint dir %s" % checkpoint_dir)
# Load model from parameters if a checkpoint exists.
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + ".index"):
data.print_out("Reading model parameters from %s"
% ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
elif sv is None:
sess.run(tf.global_variables_initializer())
data.print_out("Initialized variables (no supervisor mode).")
elif FLAGS.task < 1 and FLAGS.mem_size > 0:
# sess.run(model.mem_norm_op)
data.print_out("Created new model and normalized mem (on chief).")
# Return the model and needed variables.
return (model, beam_model, min_length, max_length, checkpoint_dir,
(global_train_set, dev_set, en_path, fr_path), sv, sess)
def m_step(model, beam_model, sess, batch_size, inp, target, bucket, nsteps, p):
"""Evaluation multi-step for program synthesis."""
state, scores, hist = None, [[-11.0 for _ in xrange(batch_size)]], []
for _ in xrange(nsteps):
# Get the best beam (no training, just forward model).
new_target, new_first, new_inp, new_scores = get_best_beam(
beam_model, sess, inp, target,
batch_size, FLAGS.beam_size, bucket, hist, p, test_mode=True)
hist.append(new_first)
_, _, _, state = model.step(sess, inp, new_target, False, state=state)
inp = new_inp
scores.append([max(scores[-1][i], new_scores[i])
for i in xrange(batch_size)])
# The final step with the true target.
loss, res, _, _ = model.step(sess, inp, target, False, state=state)
return loss, res, new_target, scores[1:]
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
offset=None, beam_model=None):
"""Test model on test data of length l using the given session."""
if not dev[p][bin_id]:
data.print_out(" bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
% (bin_id, data.bins[bin_id], p))
return 1.0, 1.0, 0.0
inpt, target = data.get_batch(
bin_id, batch_size, dev[p], FLAGS.height, offset)
if FLAGS.beam_size > 1 and beam_model:
loss, res, new_tgt, scores = m_step(
model, beam_model, sess, batch_size, inpt, target, bin_id,
FLAGS.eval_beam_steps, p)
score_avgs = [sum(s) / float(len(s)) for s in scores]
score_maxs = [max(s) for s in scores]
score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
for i in xrange(FLAGS.eval_beam_steps)]
data.print_out(" == scores (avg, max): %s" % "; ".join(score_str))
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
nprint, new_tgt, scores[-1])
else:
loss, res, _, _ = model.step(sess, inpt, target, False)
errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
nprint)
seq_err = float(seq_err) / batch_size
if total > 0:
errors = float(errors) / total
if print_out:
data.print_out(" bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
% (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
100 * errors, 100 * seq_err))
return (errors, seq_err, loss)
def assign_vectors(word_vector_file, embedding_key, vocab_path, sess):
"""Assign the embedding_key variable from the given word vectors file."""
# For words in the word vector file, set their embedding at start.
if not tf.gfile.Exists(word_vector_file):
data.print_out("Word vector file does not exist: %s" % word_vector_file)
sys.exit(1)
vocab, _ = wmt.initialize_vocabulary(vocab_path)
vectors_variable = [v for v in tf.trainable_variables()
if embedding_key == v.name]
if len(vectors_variable) != 1:
data.print_out("Word vector variable not found or too many.")
sys.exit(1)
vectors_variable = vectors_variable[0]
vectors = vectors_variable.eval()
data.print_out("Pre-setting word vectors from %s" % word_vector_file)
with tf.gfile.GFile(word_vector_file, mode="r") as f:
# Lines have format: dog 0.045123 -0.61323 0.413667 ...
for line in f:
line_parts = line.split()
# The first part is the word.
word = line_parts[0]
if word in vocab:
# Remaining parts are components of the vector.
word_vector = np.array(map(float, line_parts[1:]))
if len(word_vector) != FLAGS.vec_size:
data.print_out("Warn: Word '%s', Expecting vector size %d, "
"found %d" % (word, FLAGS.vec_size,
len(word_vector)))
else:
vectors[vocab[word]] = word_vector
# Assign the modified vectors to the vectors_variable in the graph.
sess.run([vectors_variable.initializer],
{vectors_variable.initializer.inputs[1]: vectors})
def print_vectors(embedding_key, vocab_path, word_vector_file):
"""Print vectors from the given variable."""
_, rev_vocab = wmt.initialize_vocabulary(vocab_path)
vectors_variable = [v for v in tf.trainable_variables()
if embedding_key == v.name]
if len(vectors_variable) != 1:
data.print_out("Word vector variable not found or too many.")
sys.exit(1)
vectors_variable = vectors_variable[0]
vectors = vectors_variable.eval()
l, s = vectors.shape[0], vectors.shape[1]
data.print_out("Printing %d word vectors from %s to %s."
% (l, embedding_key, word_vector_file))
with tf.gfile.GFile(word_vector_file, mode="w") as f:
# Lines have format: dog 0.045123 -0.61323 0.413667 ...
for i in xrange(l):
f.write(rev_vocab[i])
for j in xrange(s):
f.write(" %.8f" % vectors[i][j])
f.write("\n")
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
"""Get a random bucket id."""
# Choose a bucket according to data distribution. Pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
if train_buckets_scale_c[i] > random_number_01])
while bucket_id > 0 and not data_set[bucket_id]:
bucket_id -= 1
for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
if data.bins[bucket_id] > max_cur_length:
random_number_01 = min(random_number_01, np.random.random_sample())
bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
if train_buckets_scale_c[i] > random_number_01])
while bucket_id > 0 and not data_set[bucket_id]:
bucket_id -= 1
return bucket_id
def score_beams(beams, target, inp, history, p,
print_out=False, test_mode=False):
"""Score beams."""
if p == "progsynth":
return score_beams_prog(beams, target, inp, history, print_out, test_mode)
elif test_mode:
return beams[0], 10.0 if str(beams[0][:len(target)]) == str(target) else 0.0
else:
history_s = [str(h) for h in history]
best, best_score, tgt, eos_id = None, -1000.0, target, None
if p == "wmt":
eos_id = wmt.EOS_ID
if eos_id and eos_id in target:
tgt = target[:target.index(eos_id)]
for beam in beams:
if eos_id and eos_id in beam:
beam = beam[:beam.index(eos_id)]
l = min(len(tgt), len(beam))
score = len([i for i in xrange(l) if tgt[i] == beam[i]]) / float(len(tgt))
hist_score = 20.0 if str([b for b in beam if b > 0]) in history_s else 0.0
if score < 1.0:
score -= hist_score
if score > best_score:
best = beam
best_score = score
return best, best_score
def score_beams_prog(beams, target, inp, history, print_out=False,
test_mode=False):
"""Score beams for program synthesis."""
tgt_prog = linearize(target, program_utils.prog_vocab, True, 1)
hist_progs = [linearize(h, program_utils.prog_vocab, True, 1)
for h in history]
tgt_set = set(target)
if print_out:
print("target: ", tgt_prog)
inps, tgt_outs = [], []
for i in xrange(3):
ilist = [inp[i + 1, l] for l in xrange(inp.shape[1])]
clist = [program_utils.prog_vocab[x] for x in ilist if x > 0]
olist = clist[clist.index("]") + 1:] # outputs
clist = clist[1:clist.index("]")] # inputs
inps.append([int(x) for x in clist])
if olist[0] == "[": # olist may be [int] or just int
tgt_outs.append(str([int(x) for x in olist[1:-1]]))
else:
if len(olist) == 1:
tgt_outs.append(olist[0])
else:
print([program_utils.prog_vocab[x] for x in ilist if x > 0])
print(olist)
print(tgt_prog)
print(program_utils.evaluate(tgt_prog, {"a": inps[-1]}))
print("AAAAA")
tgt_outs.append(olist[0])
if not test_mode:
for _ in xrange(7):
ilen = np.random.randint(len(target) - 3) + 1
inps.append([random.choice(range(-15, 15)) for _ in range(ilen)])
tgt_outs.extend([program_utils.evaluate(tgt_prog, {"a": inp})
for inp in inps[3:]])
best, best_prog, best_score = None, "", -1000.0
for beam in beams:
b_prog = linearize(beam, program_utils.prog_vocab, True, 1)
b_set = set(beam)
jsim = len(tgt_set & b_set) / float(len(tgt_set | b_set))
b_outs = [program_utils.evaluate(b_prog, {"a": inp}) for inp in inps]
errs = len([x for x in b_outs if x == "ERROR"])
imatches = len([i for i in xrange(3) if b_outs[i] == tgt_outs[i]])
perfect = 10.0 if imatches == 3 else 0.0
hist_score = 20.0 if b_prog in hist_progs else 0.0
if test_mode:
score = perfect - errs
else:
matches = len([i for i in xrange(10) if b_outs[i] == tgt_outs[i]])
score = perfect + matches + jsim - errs
if score < 10.0:
score -= hist_score
# print b_prog
# print "jsim: ", jsim, " errs: ", errs, " mtchs: ", matches, " s: ", score
if score > best_score:
best = beam
best_prog = b_prog
best_score = score
if print_out:
print("best score: ", best_score, " best prog: ", best_prog)
return best, best_score
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
bucket, history, p, test_mode=False):
"""Run beam_model, score beams, and return the best as target and in input."""
_, output_logits, _, _ = beam_model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size)
new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
for b in xrange(batch_size):
outputs = []
history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
for h in history]
for beam_idx in xrange(beam_size):
outputs.append([int(o[beam_idx * batch_size + b])
for o in output_logits])
target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
best, best_score = score_beams(
outputs, [t for t in target_t if t > 0], inp[b, :, :],
[[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
scores.append(best_score)
if 1 in best: # Only until _EOS.
best = best[:best.index(1) + 1]
best += [0 for _ in xrange(len(target_t) - len(best))]
new_targets.append([best])
first, _ = score_beams(
outputs, [t for t in target_t if t > 0], inp[b, :, :],
[[t for t in h if t > 0] for h in history_b], p, test_mode=True)
if 1 in first: # Only until _EOS.
first = first[:first.index(1) + 1]
first += [0 for _ in xrange(len(target_t) - len(first))]
new_inp[b, 0, :] = np.array(first, dtype=np.int32)
new_firsts.append([first])
# Change target if we found a great answer.
new_target = np.array(new_targets, dtype=np.int32)
for b in xrange(batch_size):
if scores[b] >= 10.0:
target[b, 0, :] = new_target[b, 0, :]
new_first = np.array(new_firsts, dtype=np.int32)
return new_target, new_first, new_inp, scores
def train():
"""Train the model."""
batch_size = FLAGS.batch_size * FLAGS.num_gpus
(model, beam_model, min_length, max_length, checkpoint_dir,
(train_set, dev_set, en_vocab_path, fr_vocab_path), sv, sess) = initialize()
with sess.as_default():
quant_op = model.quantize_op
max_cur_length = min(min_length + 3, max_length)
prev_acc_perp = [1000000 for _ in xrange(5)]
prev_seq_err = 1.0
is_chief = FLAGS.task < 1
do_report = False
# Main traning loop.
while not sv.ShouldStop():
global_step, max_cur_length, learning_rate = sess.run(
[model.global_step, model.cur_length, model.lr])
acc_loss, acc_l1, acc_total, acc_errors, acc_seq_err = 0.0, 0.0, 0, 0, 0
acc_grad_norm, step_count, step_c1, step_time = 0.0, 0, 0, 0.0
# For words in the word vector file, set their embedding at start.
bound1 = FLAGS.steps_per_checkpoint - 1
if FLAGS.word_vector_file_en and global_step < bound1 and is_chief:
assign_vectors(FLAGS.word_vector_file_en, "embedding:0",
en_vocab_path, sess)
if FLAGS.max_target_vocab < 1:
assign_vectors(FLAGS.word_vector_file_en, "target_embedding:0",
en_vocab_path, sess)
if FLAGS.word_vector_file_fr and global_step < bound1 and is_chief:
assign_vectors(FLAGS.word_vector_file_fr, "embedding:0",
fr_vocab_path, sess)
if FLAGS.max_target_vocab < 1:
assign_vectors(FLAGS.word_vector_file_fr, "target_embedding:0",
fr_vocab_path, sess)
for _ in xrange(FLAGS.steps_per_checkpoint):
step_count += 1
step_c1 += 1
global_step = int(model.global_step.eval())
train_beam_anneal = global_step / float(FLAGS.train_beam_anneal)
train_beam_freq = FLAGS.train_beam_freq * min(1.0, train_beam_anneal)
p = random.choice(FLAGS.problem.split("-"))
train_set = global_train_set[p][-1]
bucket_id = get_bucket_id(train_buckets_scale[p][-1], max_cur_length,
train_set)
# Prefer longer stuff 60% of time if not wmt.
if np.random.randint(100) < 60 and FLAGS.problem != "wmt":
bucket1 = get_bucket_id(train_buckets_scale[p][-1], max_cur_length,
train_set)
bucket_id = max(bucket1, bucket_id)
# Run a step and time it.
start_time = time.time()
inp, target = data.get_batch(bucket_id, batch_size, train_set,
FLAGS.height)
noise_param = math.sqrt(math.pow(global_step + 1, -0.55) *
prev_seq_err) * FLAGS.grad_noise_scale
# In multi-step mode, we use best from beam for middle steps.
state, new_target, scores, history = None, None, None, []
while (FLAGS.beam_size > 1 and
train_beam_freq > np.random.random_sample()):
# Get the best beam (no training, just forward model).
new_target, new_first, new_inp, scores = get_best_beam(
beam_model, sess, inp, target,
batch_size, FLAGS.beam_size, bucket_id, history, p)
history.append(new_first)
# Training step with the previous input and the best beam as target.
_, _, _, state = model.step(sess, inp, new_target, FLAGS.do_train,
noise_param, update_mem=True, state=state)
# Change input to the new one for the next step.
inp = new_inp
# If all results are great, stop (todo: not to wait for all?).
if FLAGS.nprint > 1:
print(scores)
if sum(scores) / float(len(scores)) >= 10.0:
break
# The final step with the true target.
loss, res, gnorm, _ = model.step(
sess, inp, target, FLAGS.do_train, noise_param,
update_mem=True, state=state)
step_time += time.time() - start_time
acc_grad_norm += 0.0 if gnorm is None else float(gnorm)
# Accumulate statistics.
acc_loss += loss
acc_l1 += loss
errors, total, seq_err = data.accuracy(
inp, res, target, batch_size, 0, new_target, scores)
if FLAGS.nprint > 1:
print("seq_err: ", seq_err)
acc_total += total
acc_errors += errors
acc_seq_err += seq_err
# Report summary every 10 steps.
if step_count + 3 > FLAGS.steps_per_checkpoint:
do_report = True # Don't polute plot too early.
if is_chief and step_count % 10 == 1 and do_report:
cur_loss = acc_l1 / float(step_c1)
acc_l1, step_c1 = 0.0, 0
cur_perp = data.safe_exp(cur_loss)
summary = tf.Summary()
summary.value.extend(
[tf.Summary.Value(tag="log_perplexity", simple_value=cur_loss),
tf.Summary.Value(tag="perplexity", simple_value=cur_perp)])
sv.SummaryComputed(sess, summary, global_step)
# Normalize and print out accumulated statistics.
acc_loss /= step_count
step_time /= FLAGS.steps_per_checkpoint
acc_seq_err = float(acc_seq_err) / (step_count * batch_size)
prev_seq_err = max(0.0, acc_seq_err - 0.02) # No noise at error < 2%.
acc_errors = float(acc_errors) / acc_total if acc_total > 0 else 1.0
t_size = float(sum([len(x) for x in train_set])) / float(1000000)
msg = ("step %d step-time %.2f train-size %.3f lr %.6f grad-norm %.4f"
% (global_step + 1, step_time, t_size, learning_rate,
acc_grad_norm / FLAGS.steps_per_checkpoint))
data.print_out("%s len %d ppl %.6f errors %.2f sequence-errors %.2f" %
(msg, max_cur_length, data.safe_exp(acc_loss),
100 * acc_errors, 100 * acc_seq_err))
# If errors are below the curriculum threshold, move curriculum forward.
is_good = FLAGS.curriculum_ppx > data.safe_exp(acc_loss)
is_good = is_good and FLAGS.curriculum_seq > acc_seq_err
if is_good and is_chief:
if FLAGS.quantize:
# Quantize weights.
data.print_out(" Quantizing parameters.")
sess.run([quant_op])
# Increase current length (until the next with training data).
sess.run(model.cur_length_incr_op)
# Forget last perplexities if we're not yet at the end.
if max_cur_length < max_length:
prev_acc_perp.append(1000000)
# Lower learning rate if we're worse than the last 5 checkpoints.
acc_perp = data.safe_exp(acc_loss)
if acc_perp > max(prev_acc_perp[-5:]) and is_chief:
sess.run(model.lr_decay_op)
prev_acc_perp.append(acc_perp)
# Save checkpoint.
if is_chief:
checkpoint_path = os.path.join(checkpoint_dir, "neural_gpu.ckpt")
model.saver.save(sess, checkpoint_path,
global_step=model.global_step)
# Run evaluation.
bin_bound = 4
for p in FLAGS.problem.split("-"):
total_loss, total_err, tl_counter = 0.0, 0.0, 0
for bin_id in xrange(len(data.bins)):
if bin_id < bin_bound or bin_id % FLAGS.eval_bin_print == 1:
err, _, loss = single_test(bin_id, model, sess, FLAGS.nprint,
batch_size * 4, dev_set, p,
beam_model=beam_model)
if loss > 0.0:
total_loss += loss
total_err += err
tl_counter += 1
test_loss = total_loss / max(1, tl_counter)
test_err = total_err / max(1, tl_counter)
test_perp = data.safe_exp(test_loss)
summary = tf.Summary()
summary.value.extend(
[tf.Summary.Value(tag="test/%s/loss" % p, simple_value=test_loss),
tf.Summary.Value(tag="test/%s/error" % p, simple_value=test_err),
tf.Summary.Value(tag="test/%s/perplexity" % p,
simple_value=test_perp)])
sv.SummaryComputed(sess, summary, global_step)
def linearize(output, rev_fr_vocab, simple_tokenizer=None, eos_id=wmt.EOS_ID):
# If there is an EOS symbol in outputs, cut them at that point (WMT).
if eos_id in output:
output = output[:output.index(eos_id)]
# Print out French sentence corresponding to outputs.
if simple_tokenizer or FLAGS.simple_tokenizer:
vlen = len(rev_fr_vocab)
def vget(o):
if o < vlen:
return rev_fr_vocab[o]
return "UNK"
return " ".join([vget(o) for o in output])
else:
return wmt.basic_detokenizer([rev_fr_vocab[o] for o in output])
def evaluate():
"""Evaluate an existing model."""
batch_size = FLAGS.batch_size * FLAGS.num_gpus
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
(model, beam_model, _, _, _,
(_, dev_set, en_vocab_path, fr_vocab_path), _, sess) = initialize(sess)
for p in FLAGS.problem.split("-"):
for bin_id in xrange(len(data.bins)):
if (FLAGS.task >= 0 and bin_id > 4) or (FLAGS.nprint == 0 and
bin_id > 8 and p == "wmt"):
break
single_test(bin_id, model, sess, FLAGS.nprint, batch_size, dev_set, p,
beam_model=beam_model)
path = FLAGS.test_file_prefix
xid = "" if FLAGS.task < 0 else ("%.4d" % (FLAGS.task + FLAGS.decode_offset))
en_path, fr_path = path + ".en" + xid, path + ".fr" + xid
# Evaluate the test file if they exist.
if path and tf.gfile.Exists(en_path) and tf.gfile.Exists(fr_path):
data.print_out("Translating test set %s" % en_path)
# Read lines.
en_lines, fr_lines = [], []
with tf.gfile.GFile(en_path, mode="r") as f:
for line in f:
en_lines.append(line.strip())
with tf.gfile.GFile(fr_path, mode="r") as f:
for line in f:
fr_lines.append(line.strip())
# Tokenize and convert to ids.
en_vocab, _ = wmt.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = wmt.initialize_vocabulary(fr_vocab_path)
if FLAGS.simple_tokenizer:
en_ids = [wmt.sentence_to_token_ids(
l, en_vocab, tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
for l in en_lines]
else:
en_ids = [wmt.sentence_to_token_ids(l, en_vocab) for l in en_lines]
# Translate.
results = []
for idx, token_ids in enumerate(en_ids):
if idx % 5 == 0:
data.print_out("Translating example %d of %d." % (idx, len(en_ids)))
# Which bucket does it belong to?
buckets = [b for b in xrange(len(data.bins))
if data.bins[b] >= len(token_ids)]
if buckets:
result, result_cost = [], 100000000.0
for bucket_id in buckets:
if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR:
break
# Get a 1-element batch to feed the sentence to the model.
used_batch_size = 1 # batch_size
inp, target = data.get_batch(
bucket_id, used_batch_size, None, FLAGS.height,
preset=([token_ids], [[]]))
loss, output_logits, _, _ = model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size)
outputs = [int(o[0]) for o in output_logits]
loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm)
if FLAGS.simple_tokenizer:
cur_out = outputs
if wmt.EOS_ID in cur_out:
cur_out = cur_out[:cur_out.index(wmt.EOS_ID)]
res_tags = [rev_fr_vocab[o] for o in cur_out]
bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags)
loss += 1000.0 * bad_words + 100.0 * bad_brack
# print (bucket_id, loss)
if loss < result_cost:
result = outputs
result_cost = loss
final = linearize(result, rev_fr_vocab)
results.append("%s\t%s\n" % (final, fr_lines[idx]))
# print result_cost
sys.stderr.write(results[-1])
sys.stderr.flush()
else:
sys.stderr.write("TOOO_LONG\t%s\n" % fr_lines[idx])
sys.stderr.flush()
if xid:
decode_suffix = "beam%dln%dn" % (FLAGS.beam_size,
int(100 * FLAGS.length_norm))
with tf.gfile.GFile(path + ".res" + decode_suffix + xid, mode="w") as f:
for line in results:
f.write(line)
def mul(l):
res = 1.0
for s in l:
res *= s
return res
def interactive():
"""Interactively probe an existing model."""
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
# Initialize model.
(model, _, _, _, _, (_, _, en_path, fr_path), _, _) = initialize(sess)
# Load vocabularies.
en_vocab, rev_en_vocab = wmt.initialize_vocabulary(en_path)
_, rev_fr_vocab = wmt.initialize_vocabulary(fr_path)
# Print out vectors and variables.
if FLAGS.nprint > 0 and FLAGS.word_vector_file_en:
print_vectors("embedding:0", en_path, FLAGS.word_vector_file_en)
if FLAGS.nprint > 0 and FLAGS.word_vector_file_fr:
print_vectors("target_embedding:0", fr_path, FLAGS.word_vector_file_fr)
total = 0
for v in tf.trainable_variables():
shape = v.get_shape().as_list()
total += mul(shape)
print(v.name, shape, mul(shape))
print(total)
# Start interactive loop.
sys.stdout.write("Input to Neural GPU Translation Model.\n")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline(), ""
while inpt:
cures = []
# Get token-ids for the input sentence.
if FLAGS.simple_tokenizer:
token_ids = wmt.sentence_to_token_ids(
inpt, en_vocab, tokenizer=wmt.space_tokenizer,
normalize_digits=FLAGS.normalize_digits)
else:
token_ids = wmt.sentence_to_token_ids(inpt, en_vocab)
print([rev_en_vocab[t] for t in token_ids])
# Which bucket does it belong to?
buckets = [b for b in xrange(len(data.bins))
if data.bins[b] >= max(len(token_ids), len(cures))]
if cures:
buckets = [buckets[0]]
if buckets:
result, result_cost = [], 10000000.0
for bucket_id in buckets:
if data.bins[bucket_id] > MAXLEN_F * len(token_ids) + EVAL_LEN_INCR:
break
glen = 1
for gen_idx in xrange(glen):
# Get a 1-element batch to feed the sentence to the model.
inp, target = data.get_batch(
bucket_id, 1, None, FLAGS.height, preset=([token_ids], [cures]))
loss, output_logits, _, _ = model.step(
sess, inp, target, None, beam_size=FLAGS.beam_size,
update_mem=False)
# If it is a greedy decoder, outputs are argmaxes of output_logits.
if FLAGS.beam_size > 1:
outputs = [int(o) for o in output_logits]
else:
loss = loss[0] - (data.bins[bucket_id] * FLAGS.length_norm)
outputs = [int(np.argmax(logit, axis=1))
for logit in output_logits]
print([rev_fr_vocab[t] for t in outputs])
print(loss, data.bins[bucket_id])
print(linearize(outputs, rev_fr_vocab))
cures.append(outputs[gen_idx])
print(cures)
print(linearize(cures, rev_fr_vocab))
if FLAGS.simple_tokenizer:
cur_out = outputs
if wmt.EOS_ID in cur_out:
cur_out = cur_out[:cur_out.index(wmt.EOS_ID)]
res_tags = [rev_fr_vocab[o] for o in cur_out]
bad_words, bad_brack = wmt.parse_constraints(token_ids, res_tags)
loss += 1000.0 * bad_words + 100.0 * bad_brack
if loss < result_cost:
result = outputs
result_cost = loss
print("FINAL", result_cost)
print([rev_fr_vocab[t] for t in result])
print(linearize(result, rev_fr_vocab))
else:
print("TOOO_LONG")
sys.stdout.write("> ")
sys.stdout.flush()
inpt = sys.stdin.readline(), ""
def main(_):
if FLAGS.mode == 0:
train()
elif FLAGS.mode == 1:
evaluate()
else:
interactive()
if __name__ == "__main__":
tf.app.run()
|
httpserver.py | import os
import queue
import shutil
import tempfile
import threading
import http.server
import socketserver
from ..extern.RangeHTTPServer import RangeHTTPRequestHandler
__all__ = ['HTTPServer', 'RangeHTTPServer']
def run_server(tmpdir, handler_class, stop_event, queue): # pragma: no cover
"""
Runs an HTTP server serving files from given tmpdir in a separate
process. When it's ready, it sends a URL to the server over a
queue so the main process (the HTTP client) can start making
requests of it.
"""
class HTTPRequestHandler(handler_class):
def translate_path(self, path):
path = handler_class.translate_path(self, path)
path = os.path.join(
tmpdir,
os.path.relpath(path, os.getcwd()))
return path
server = socketserver.TCPServer(("127.0.0.1", 0), HTTPRequestHandler)
domain, port = server.server_address
url = "http://{0}:{1}/".format(domain, port)
# Set a reasonable timeout so that invalid requests (which may occur during
# testing) do not cause the entire test suite to hang indefinitely
server.timeout = 0.1
queue.put(url)
# Using server.serve_forever does not work here since it ignores the
# timeout value set above. Having an explicit loop also allows us to kill
# the server from the parent thread.
while not stop_event.is_set():
server.handle_request()
server.server_close()
class HTTPServer:
handler_class = http.server.SimpleHTTPRequestHandler
def __init__(self):
self.tmpdir = tempfile.mkdtemp()
q = queue.Queue()
self.stop_event = threading.Event()
args = (self.tmpdir, self.handler_class, self.stop_event, q)
self.thread = threading.Thread(target=run_server, args=args)
self.thread.start()
self.url = q.get()
def finalize(self):
self.stop_event.set()
self.thread.join()
shutil.rmtree(self.tmpdir)
class RangeHTTPServer(HTTPServer):
handler_class = RangeHTTPRequestHandler
|
generic_shell.py | import sublime
import os
import sys
import threading
import subprocess
from time import time, sleep
from .settings import Settings
"""
Generic shell implementations, derived from Sublime Text's exec command
implementation
"""
class GenericShell(threading.Thread):
def __init__(self, cmds, view, on_complete=None, no_echo=False,
read_only=False, to_console=False, params=None):
self.params = params
self.cmds = cmds
self.on_complete = on_complete
self.no_echo = no_echo
self.read_only = read_only
self.view = view
self.to_console = to_console
self.cwd = None
threading.Thread.__init__(self)
def set_cwd(self, path=""):
self.cwd = path
def popen(self, cmd, cwd):
if not os.path.exists(cwd):
cwd = os.path.expanduser('~')
if sys.platform == "win32":
return subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd, shell=True
)
elif sys.platform == "darwin":
return subprocess.Popen(
["/bin/bash", "-l", "-c", cmd], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd,
shell=False
)
elif sys.platform == "linux":
return subprocess.Popen(
["/bin/bash", "-c", cmd], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd,
shell=False
)
else:
return subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=cwd, shell=False
)
def kill(self, proc):
if sys.platform == "win32":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen(
"taskkill /F /PID %s /T" % (str(proc.pid)),
startupinfo=startupinfo
)
else:
proc.terminate()
def read_stdout(self):
while True:
data = os.read(self.proc.stdout.fileno(), 512)
if len(data) > 0:
_, layout_height = self.view.layout_extent()
_, viewport_height = self.view.viewport_extent()
viewport_posx, viewport_posy = self.view.viewport_position()
decoded_data = data.decode(
Settings.get("encoding"),
Settings.get("encoding_handle")
).replace("\r\n", "\n")
self.view.set_read_only(False)
self.view.run_command(
"terminality_utils",
{"util_type": "add", "text": decoded_data}
)
self.view.set_read_only(self.read_only)
self.old_data += decoded_data
if (Settings.get("autoscroll_to_bottom") and
viewport_posy >= (layout_height - viewport_height -
Settings.get("autoscroll_snap_range"))):
_, layout_height = self.view.layout_extent()
self.view.set_viewport_position(
(viewport_posx, layout_height - viewport_height),
False
)
if self.to_console:
print(decoded_data)
elif self.proc.poll() is not None:
break
sleep(Settings.get("refresh_interval"))
self.isReadable = False
def read_stdin(self):
while self.proc.poll() is None:
# If input make output less than before, reset it
if len(self.old_data) > self.view.size():
self.view.run_command(
"terminality_utils",
{"util_type": "clear"}
)
self.view.run_command(
"terminality_utils",
{"util_type": "add", "text": self.old_data}
)
elif len(self.old_data) < self.view.size():
self.data_in = self.view.substr(
sublime.Region(len(self.old_data), self.view.size())
)
if "\n" in self.data_in:
if self.no_echo:
self.view.run_command(
"terminality_utils",
{"util_type": "erase", "region": [
len(self.old_data), self.view.size()
]}
)
os.write(
self.proc.stdin.fileno(),
self.data_in.encode(Settings.get("encoding"))
)
self.old_data = self.view.substr(
sublime.Region(0, self.view.size())
)
self.data_in = ""
sleep(Settings.get("refresh_interval"))
self.isWritable = False
def run(self):
start_time = time()
self.proc = self.popen(self.cmds, self.cwd)
self.old_data = self.view.substr(sublime.Region(0, self.view.size()))
self.data_in = ""
self.return_code = None
self.isReadable = True
self.isWritable = True
threading.Thread(target=self.read_stdout).start()
if not self.read_only:
threading.Thread(target=self.read_stdin).start()
while self.view is not None and self.view.window() is not None:
if self.proc.poll() is not None:
self.return_code = self.proc.poll()
if not self.isWritable and not self.isReadable:
self.proc.stdout.close()
self.proc.stdin.close()
break
sleep(Settings.get("refresh_interval"))
if self.return_code is None:
self.kill(self.proc)
self.result = True
if self.on_complete is not None:
self.on_complete(
time() - start_time,
self.return_code,
self.params
)
|
utils.py | #!/usr/bin/env python
import sys
import gc
import glob
import array
import os
import random
from PIL import Image
import mss
try:
# import cupy as np
import numpy as np
except ImportError:
print("Cupy not installed.")
import numpy as np
from skimage.color import rgb2gray
from skimage.transform import resize
from skimage.io import imread
# comment these out when using WSL
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import openvino_test
import cv2
from inputs import get_gamepad
import math
import threading
class Screenshotter(object):
def __init__(self):
self.sct = mss.mss()
self.ie, self.net, self.exec_net, self.output_layer_ir, self.input_layer_ir = openvino_test.start()
def take_screenshot(self):
# Get raw pixels from the screen
sct_img = self.sct.grab({ "top":Screenshot.OFFSET_Y,
"left": Screenshot.OFFSET_X,
"width": Screenshot.SRC_W,
"height": Screenshot.SRC_H})
# Create the Image
temp = np.array(Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX'))
#Perform segmentation
# temp = self.convert_to_segmented(temp)
# DEBUG
import matplotlib.pyplot as plt
# Resize
vec = cv2.resize(temp, (Sample.IMG_W, Sample.IMG_H), interpolation=cv2.INTER_LINEAR_EXACT)
# Augmentations
# vec = cv2.rectangle(img=vec.astype(np.uint8), pt1=(int(0),int(0)), pt2=(int(480), int(90)), color=[0, 0, 0], thickness=cv2.FILLED)
return vec
def convert_to_segmented(self, img):
return openvino_test.inference(img, self.ie, self.net, self.exec_net, self.output_layer_ir, self.input_layer_ir, True)
def resize_image(img):
im = resize(img, (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
im_arr = im.reshape((Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
return im_arr
class Screenshot(object):
SRC_W = 1920
SRC_H = 1080
# SRC_W = 300
# SRC_H = 300
SRC_D = 3
OFFSET_X = 320 # because of ultrawide monitor
OFFSET_Y = 0
# OFFSET_X = 1920
# OFFSET_Y = 780
class Sample(object):
IMG_W = 480
IMG_H = 270
# IMG_W = 300
# IMG_H = 300
IMG_D = 3
class XboxController(object):
MAX_TRIG_VAL = math.pow(2, 8)
MAX_JOY_VAL = math.pow(2, 15)
def __init__(self):
self.LeftJoystickY = 0
self.LeftJoystickX = 0
self.RightJoystickY = 0
self.RightJoystickX = 0
self.LeftTrigger = 0
self.RightTrigger = 0
self.LeftBumper = 0
self.RightBumper = 0
self.A = 0
self.X = 0
self.Y = 0
self.B = 0
self.LeftThumb = 0
self.RightThumb = 0
self.Back = 0
self.Start = 0
self.LeftDPad = 0
self.RightDPad = 0
self.UpDPad = 0
self.DownDPad = 0
self._monitor_thread = threading.Thread(target=self._monitor_controller, args=())
self._monitor_thread.daemon = True
self._monitor_thread.start()
def read(self):
L_X = self.LeftJoystickX
L_Y = self.LeftJoystickY
R_X = self.RightJoystickX
R_Y = self.RightJoystickY
LT = self.LeftTrigger
RT = self.RightTrigger
LB = self.LeftBumper
RB = self.RightBumper
A = self.A
X = self.X
Y = self.Y
B = self.B
LTh = self.LeftThumb
RTh = self.RightThumb
Back = self.Back
Start = self.Start
# dpad does not work
DP_L = self.LeftDPad
DP_R = self.RightDPad
DP_U = self.UpDPad
DP_D = self.DownDPad
# return [L_X, L_Y, R_X, R_Y, RT]
return [L_X, L_Y, R_X, R_Y, LT, RT, LB, RB, A, X, Y, B, LTh, RTh, Back, Start]
# return [L_X, L_Y, R_X, R_Y, RT]
def _monitor_controller(self):
while True:
events = get_gamepad()
for event in events:
if event.code == 'ABS_Y':
self.LeftJoystickY = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_X':
self.LeftJoystickX = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RY':
self.RightJoystickY = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RX':
self.RightJoystickX = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_Z':
self.LeftTrigger = event.state / XboxController.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'ABS_RZ':
self.RightTrigger = event.state / XboxController.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'BTN_TL':
self.LeftBumper = event.state
elif event.code == 'BTN_TR':
self.RightBumper = event.state
elif event.code == 'BTN_SOUTH':
self.A = event.state
elif event.code == 'BTN_NORTH':
self.X = event.state
elif event.code == 'BTN_WEST':
self.Y = event.state
elif event.code == 'BTN_EAST':
self.B = event.state
elif event.code == 'BTN_THUMBL':
self.LeftThumb = event.state
elif event.code == 'BTN_THUMBR':
self.RightThumb = event.state
elif event.code == 'BTN_SELECT':
self.Back = event.state
elif event.code == 'BTN_START':
self.Start = event.state
elif event.code == 'BTN_TRIGGER_HAPPY1':
self.LeftDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY2':
self.RightDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY3':
self.UpDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY4':
self.DownDPad = event.state
class Data(object):
def __init__(self):
self._X = np.load("data/X.npy")
self._y = np.load("data/y.npy")
self._epochs_completed = 0
self._index_in_epoch = 0
self._num_examples = self._X.shape[0]
@property
def num_examples(self):
return self._num_examples
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._X[start:end], self._y[start:end]
def load_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16))
return image_files, joystick_values
def load_mini_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,2,3,4,5,6,9,10))
return image_files, joystick_values
def load_categorical_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(17,))
return image_files, joystick_values
def load_racing_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,5,6))
return image_files, joystick_values
def load_steering_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,))
return image_files, joystick_values
def load_imgs(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
return image_files
# training data viewer
def viewer(sample):
image_files, joystick_values = load_sample(sample)
plotData = []
plt.ion()
plt.figure('viewer', figsize=(16, 6))
for i in range(len(image_files)):
# joystick
print(i, " ", joystick_values[i,:])
# format data
plotData.append( joystick_values[i,:] )
if len(plotData) > 30:
plotData.pop(0)
x = np.asarray(plotData)
# image (every 3rd)
# if (i % 3 == 0):
plt.subplot(121)
image_file = image_files[i]
img = mpimg.imread(image_file)
plt.imshow(img)
# plot
plt.subplot(122)
plt.plot(range(i,i+len(plotData)), x[:,0], 'r')
# plt.hold(True)
# plt.plot(range(i,i+len(plotData)), x[:,1], 'b')
# plt.plot(range(i,i+len(plotData)), x[:,2], 'g')
# plt.plot(range(i,i+len(plotData)), x[:,3], 'k')
plt.plot(range(i,i+len(plotData)), x[:,4], 'y')
plt.plot(range(i,i+len(plotData)), x[:,5], 'c')
# plt.plot(range(i,i+len(plotData)), x[:,6], 'm')
# plt.plot(range(i,i+len(plotData)), x[:,7], 'skyblue')
# plt.plot(range(i,i+len(plotData)), x[:,8], 'springgreen')
# plt.plot(range(i,i+len(plotData)), x[:,9], 'orange')
# plt.plot(range(i,i+len(plotData)), x[:,10], 'maroon')
# plt.plot(range(i,i+len(plotData)), x[:,11], 'peachpuff')
# plt.plot(range(i,i+len(plotData)), x[:,12], 'lime')
# plt.plot(range(i,i+len(plotData)), x[:,13], 'plum')
# plt.plot(range(i,i+len(plotData)), x[:,14], 'navy')
# plt.plot(range(i,i+len(plotData)), x[:,15], 'aqua')
plt.draw()
# plt.hold(False)
plt.pause(0.01) # seconds
i += 1
# prepare training data
def prepare(samples, augment=True):
print(f"Preparing data from {samples[0]}")
y = []
paths = [os.path.normpath(i) for i in glob.glob(samples[0])]
numpics = 0
# for sample in samples:
for sample in paths:
print(sample)
image_files = load_imgs(sample)
numpics += len(image_files)
del sample
del image_files
gc.collect()
print(numpics)
X = np.empty(shape=(numpics,Sample.IMG_H,Sample.IMG_W,3),dtype=np.uint8)
idx = 0 # Current image write index - from 0 to numpics
for sample in paths:
#for sample in samples:
print(f"Processing {sample}")
# load sample
# image_files, joystick_values = load_sample(os.path.normpath(sample))
# load condensed sample
image_files, joystick_values = load_steering_sample(os.path.normpath(sample))
# add joystick values to y
print(f"Joystick values shape {joystick_values.shape}")
y.append(joystick_values)
# load, prepare and add images to X
for image_file in image_files:
image = imread(image_file)
vec = resize_image(image)
if augment:
## Augmentation
# Mirror image
### if random.choice([True, False]):
### vec = vec[:, ::-1, :] # horizontally mirror image
### y[-1][0] *= -1 # negate steering value
# Crop image (by adding black rectangle to mask extraneous details)
# print(vec.dtype, vec.shape)
# sys.exit(1)
vec = cv2.rectangle(img=vec.astype(np.uint8), pt1=(int(0),int(0)), pt2=(int(480), int(90)), color=[0, 0, 0], thickness=cv2.FILLED)
# Add random jitter to steering values
### y[-1][0] += np.random.normal(loc=0, scale=0.01)
# TODO Add Bias
X[idx] = vec
idx += 1
del image
gc.collect()
# try to do some memory management
# delete the current sample data since it has been appended to x and y
del image_files
del joystick_values
gc.collect()
print("Saving to file...")
X = np.asarray(X)
y = np.concatenate(y)
np.save("data/x_fh5", X)
np.save("data/y_fh5", y)
print("Done!")
print(X.shape)
print(np.asarray(y).shape)
return
if __name__ == '__main__':
if sys.argv[1] == 'viewer':
viewer(sys.argv[2])
elif sys.argv[1] == 'prepare':
prepare(sys.argv[2:], augment=False)
elif sys.argv[1] == 'prepare_augment':
prepare(sys.argv[2:])
|
rview.py | """ Library for supporting general functionality in retroactive views. """
import abc
import inspect
import numbers
import threading
import db.errors as errors
import db.rrecord as rrecord
import db.types.subscribable as rpubsub
def wrap(cls, clsname=None, no_rewrite=()):
"""
Wraps a class so that all its inherited functions operate dynamically on the
_value member variable. By default, the wrapped class also inherits the name
of cls, except with a suffix of '<retro>'.
If _value is not properly initialized before use, its value will be a
RViewValueUninitializedException. If cls defines _value, that will also
validly overwrite _value.
Args:
cls (class): the class to wrap
clsname (str): the desired name for the new class. Defaults to None, which
indicates to use cls's name appended with '<retro>'
no_rewrite (str): a list of functions that should not be (re)written.
Defaults to an empty tuple
Returns a class that is the wrapped version of cls.
"""
attributes = {
'_value': errors.RViewValueUninitializedException(
'Attempted to use a view with an uninitialized value. ' +
'Remember to initialize values in a view. ' +
'See the documentation of rview.RView for details.'),
'_wrapped_cls': cls,
}
modfuncs = ('__repr__', '__str__')
modfuncs = [(name, getattr(cls, name)) for name in modfuncs]
modfuncs += inspect.getmembers(cls, predicate=inspect.isfunction)
for name, func in modfuncs:
if (name not in no_rewrite) and (not getattr(func, '_rwrapped', False)):
attributes[name] = lambda self, *args, **kwargs: \
getattr(self._value, name)(*args, **kwargs)
# Prevents over-wrapping of a wrapped function
setattr(attributes[name], '_rwrapped', True)
if clsname is None:
clsname = cls.__name__ + '<retro>'
return type(clsname, (cls,), attributes)
# Below is a set of wrapped classes that can be used to superclass retroactive
# views.
class RView(wrap(object), rpubsub.Subscribable, abc.ABC):
PUBSUB_INITIAL_TIMEOUT = 0.1
PUBSUB_MAX_TIMEOUT = 10
def __init__(self, obj, *args, **kwargs):
"""
Initializes a callback thread that waits on the given Subscribable object
for publish-subscribe updates. This thread is stored in the
_cb_thread member variable.
Args:
obj (subscribable): a Subscribable object on which to await for pub-sub
changes
args (tuple): positional argments to pass up the MRO chain
kwargs (dict): keyword arguments to pass up the MRO chain
Raises a RViewInitException if obj is not a Subscribable object.
"""
super(RView, self).__init__(*args, **kwargs)
if not isinstance(obj, rpubsub.Subscribable):
raise errors.RViewInitException(
'Parameter of type %s is not a Subscribable object' % \
obj.__class__.__name__)
# Signals for quiet exits
self._pubsub_exit_event = threading.Event() # Signal to exit
self._pubsub_exited_event = threading.Event() # Signal of successful exit
# Start callback thread
self._cb_thread = threading.Thread(
target=self._callback, args=(obj, None))
def __del__(self):
""" Calls free() to perform thread cleanup on the callback thread. """
super(RView, self).__del__()
self.free()
def _all_records(self):
return self._value
def _apply_changes(self, changes):
"""
Apply the changelist in changes to the current state.
Args:
changes (list(rrecord.Record)): the list of changes
"""
for record in changes:
if rrecord.Record.INSERT == record.action:
self._callback_insert(record)
elif rrecord.Record.DELETE == record.action:
self._callback_delete(record)
elif rrecord.Record.ERASE == record.action:
self._callback_erase(record.time, record.records)
else: # Change type is not part of enumeration
# TODO: handle this case
pass
def _callback(self, obj, checkpoint):
"""
Body of the callback thread. The sole parameter is the object to which this
callback is subscribing.
Args:
obj (rpubsub.Subscribable): the object that has already been verified to
be Subscribable
checkpoint (int): the initial marker for when the last set of changes
were seen by this subscriber
"""
timeout = RView.PUBSUB_INITIAL_TIMEOUT
while not self._pubsub_exit_event.is_set():
new_checkpoint, changes = obj.subscribe(checkpoint, timeout=timeout)
if new_checkpoint == checkpoint: # No changes
# Exponential backoff
timeout = min(RView.PUBSUB_MAX_TIMEOUT, 2 * timeout)
else:
# Reset exponential backoff
timeout = RView.PUBSUB_INITIAL_TIMEOUT
checkpoint = new_checkpoint
self._apply_changes(changes)
self._pubsub_exited_event.set()
obj.unsubscribe(checkpoint)
@abc.abstractmethod
def _callback_delete(self, record):
"""
Callback method for handling the case of rrecord.Record.DELETE for a single
change.
Args:
record (rrecord.Record): the deletion record to make
"""
pass
@abc.abstractmethod
def _callback_erase(self, time, records):
"""
Callback method for handling the case of rrecord.Record.ERASE for a single
change, invoking retroactive erasure.
Args:
time (int): the time at which these records were added
records (rrecord.Record): the records to retroactively erase
"""
pass
@abc.abstractmethod
def _callback_insert(self, change):
"""
Callback method for handling the case of rrecord.Record.INSERT for a single
change.
Args:
record (rrecord.Record): the insertion record to make
"""
pass
def _start(self):
"""
Starts the callback thread. Manually called by subclasses after
initialization, so that callbacks can be correctly completed.
"""
self._cb_thread.start()
def free(self):
""" Performs cleanup, such as on the callback thread. """
self._pubsub_exit_event.set()
self._pubsub_exited_event.wait()
# Some extended RView abstract subclasses
class RViewIntegrable(RView, wrap(numbers.Integral)): pass
class RViewString(RView, wrap(str)): pass
|
stage_op_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class StageTest(test.TestCase):
def testSimple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32])
stage = stager.put([v])
y = stager.get()
y = math_ops.reduce_max(math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testMultiple(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea([dtypes.float32, dtypes.float32])
stage = stager.put([x, v])
z, y = stager.get()
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testDictionary(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.StagingArea([dtypes.float32])
y = stager.put([v])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
x = stager.get()[0]
self.assertEqual(x.device, '/device:CPU:0')
G.finalize()
def testPeek(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], shapes=[[]])
stage = stager.put([x])
peek = stager.peek(p)
ret = stager.get()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
for i in range(10):
sess.run(stage, feed_dict={x: i})
for i in range(10):
self.assertTrue(sess.run(peek, feed_dict={p: i}) == [i])
def testSizeAndClear(self):
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put({'x': x, 'v': v})
ret = stager.get()
size = stager.size()
clear = stager.clear()
G.finalize()
with self.session(use_gpu=True, graph=G) as sess:
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.int32,
], capacity=capacity, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(sess.run(ret) == [i])
# It should now be empty
self.assertTrue(sess.run(size) == 0)
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as G:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.StagingArea(
[
dtypes.uint8,
], memory_limit=memory_limit, shapes=[[]])
stage = stager.put([x])
ret = stager.get()
size = stager.size()
G.finalize()
from six.moves import queue as Queue
import threading
import numpy as np
queue = Queue.Queue()
n = 8
with self.session(use_gpu=True, graph=G) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: np.full(chunk, i, dtype=np.uint8)})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertTrue(sess.run(size) == capacity)
# Clear the staging area completely
for i in range(n):
self.assertTrue(np.all(sess.run(ret)[0] == i))
self.assertTrue(sess.run(size) == 0)
if __name__ == '__main__':
test.main()
|
utils.py |
import json
import sys
import re
import os
import stat
import fcntl
import shutil
import hashlib
import tempfile
import subprocess
import base64
import threading
import pipes
import uuid
import codecs
import zipfile
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
from io import BytesIO, StringIO
from six import string_types, PY2, PY3, text_type, binary_type
class Bunch(object):
'''
Collect a bunch of variables together in an object.
This is a slight modification of Alex Martelli's and Doug Hudgeon's Bunch pattern.
'''
def __init__(self, **kwargs):
self.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, key):
return self.__dict__.get(key)
def isplaybook(obj):
'''
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not
'''
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping))
def isinventory(obj):
'''
Inspects the object and returns if it is an inventory
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is an inventory dict and False if it is not
'''
return isinstance(obj, Mapping) or isinstance(obj, string_types)
def check_isolation_executable_installed(isolation_executable):
'''
Check that process isolation executable (e.g. podman, docker, bwrap) is installed.
'''
cmd = [isolation_executable, '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
raise RuntimeError(f'{isolation_executable} unavailable for unexpected reason.')
return False
def stream_dir(directory):
buf = BytesIO()
with zipfile.ZipFile(buf, 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as archive:
if directory:
for dirpath, dirs, files in os.walk(directory):
relpath = os.path.relpath(dirpath, directory)
if relpath == ".":
relpath = ""
for fname in files:
archive.write(os.path.join(dirpath, fname), arcname=os.path.join(relpath, fname))
archive.close()
payload = base64.b85encode(buf.getvalue())
return b'\n'.join((json.dumps({'zipfile': len(payload)}).encode('utf-8'), payload))
def unstream_dir(data, directory):
# NOTE: caller needs to process exceptions
data = base64.b85decode(data)
buf = BytesIO(data)
with zipfile.ZipFile(buf, 'r') as archive:
# Fancy extraction in order to preserve permissions
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
for info in archive.infolist():
archive.extract(info.filename, path=directory)
out_path = os.path.join(directory, info.filename)
perm = info.external_attr >> 16
os.chmod(out_path, perm)
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
def cleanup_artifact_dir(path, num_keep=0):
# 0 disables artifact dir cleanup/rotation
if num_keep < 1:
return
all_paths = sorted([os.path.join(path, p) for p in os.listdir(path)],
key=lambda x: os.path.getmtime(x))
total_remove = len(all_paths) - num_keep
for f in range(total_remove):
shutil.rmtree(all_paths[f])
def dump_artifacts(kwargs):
'''
Introspect the kwargs and dump objects to disk
'''
private_data_dir = kwargs.get('private_data_dir')
if not private_data_dir:
private_data_dir = tempfile.mkdtemp()
kwargs['private_data_dir'] = private_data_dir
if not os.path.exists(private_data_dir):
raise ValueError('private_data_dir path is either invalid or does not exist')
if 'role' in kwargs:
role = {'name': kwargs.pop('role')}
if 'role_vars' in kwargs:
role['vars'] = kwargs.pop('role_vars')
play = [{'hosts': kwargs.pop('hosts', 'all'), 'roles': [role]}]
if kwargs.pop('role_skip_facts', False):
play[0]['gather_facts'] = False
kwargs['playbook'] = play
if 'envvars' not in kwargs:
kwargs['envvars'] = {}
roles_path = kwargs.pop('roles_path', None)
if not roles_path:
roles_path = os.path.join(private_data_dir, 'roles')
else:
roles_path += ':{}'.format(os.path.join(private_data_dir, 'roles'))
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = roles_path
obj = kwargs.get('playbook')
if obj and isplaybook(obj):
path = os.path.join(private_data_dir, 'project')
kwargs['playbook'] = dump_artifact(json.dumps(obj), path, 'main.json')
obj = kwargs.get('inventory')
if obj and isinventory(obj):
path = os.path.join(private_data_dir, 'inventory')
if isinstance(obj, Mapping):
kwargs['inventory'] = dump_artifact(json.dumps(obj), path, 'hosts.json')
elif isinstance(obj, string_types):
if not os.path.exists(obj):
kwargs['inventory'] = dump_artifact(obj, path, 'hosts')
for key in ('envvars', 'extravars', 'passwords', 'settings'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(json.dumps(obj), path, key)
kwargs.pop(key)
for key in ('ssh_key', 'cmdline'):
obj = kwargs.get(key)
if obj and not os.path.exists(os.path.join(private_data_dir, 'env', key)):
path = os.path.join(private_data_dir, 'env')
dump_artifact(str(kwargs[key]), path, key)
kwargs.pop(key)
def collect_new_events(event_path,old_events):
'''
Collect new events for the 'events' generator property
'''
dir_events = os.listdir(event_path)
dir_events_actual = []
for each_file in dir_events:
if re.match("^[0-9]+-.+json$", each_file):
if '-partial' not in each_file and each_file not in old_events.keys() :
dir_events_actual.append(each_file)
dir_events_actual.sort(key=lambda filenm: int(filenm.split("-", 1)[0]))
for event_file in dir_events_actual:
with codecs.open(os.path.join(event_path, event_file), 'r', encoding='utf-8') as event_file_actual:
try:
event = json.load(event_file_actual)
except ValueError:
break
old_events[event_file] = True
yield event, old_events
class OutputEventFilter(object):
'''
File-like object that looks for encoded job events in stdout data.
'''
EVENT_DATA_RE = re.compile(r'\x1b\[K((?:[A-Za-z0-9+/=]+\x1b\[\d+D)+)\x1b\[K')
def __init__(self, handle, event_callback,
suppress_ansible_output=False, output_json=False):
self._event_callback = event_callback
self._counter = 0
self._start_line = 0
self._handle = handle
self._buffer = StringIO()
self._last_chunk = ''
self._current_event_data = None
self.output_json = output_json
self.suppress_ansible_output = suppress_ansible_output
def flush(self):
self._handle.flush()
def write(self, data):
self._buffer.write(data)
# keep a sliding window of the last chunk written so we can detect
# event tokens and determine if we need to perform a search of the full
# buffer
should_search = '\x1b[K' in (self._last_chunk + data)
self._last_chunk = data
# Only bother searching the buffer if we recently saw a start/end
# token (\x1b[K)
while should_search:
value = self._buffer.getvalue()
match = self.EVENT_DATA_RE.search(value)
if not match:
break
try:
base64_data = re.sub(r'\x1b\[\d+D', '', match.group(1))
event_data = json.loads(base64.b64decode(base64_data).decode('utf-8'))
except ValueError:
event_data = {}
event_data = self._emit_event(value[:match.start()], event_data)
if not self.output_json:
stdout_actual = event_data['stdout'] if 'stdout' in event_data else None
else:
stdout_actual = json.dumps(event_data)
remainder = value[match.end():]
self._buffer = StringIO()
self._buffer.write(remainder)
if stdout_actual and stdout_actual != "{}":
if not self.suppress_ansible_output:
sys.stdout.write(
stdout_actual.encode('utf-8') if PY2 else stdout_actual
)
sys.stdout.write("\n")
sys.stdout.flush()
self._handle.write(stdout_actual + "\n")
self._handle.flush()
self._last_chunk = remainder
else:
# Verbose stdout outside of event data context
if data and '\n' in data and self._current_event_data is None:
# emit events for all complete lines we know about
lines = self._buffer.getvalue().splitlines(True) # keep ends
remainder = None
# if last line is not a complete line, then exclude it
if '\n' not in lines[-1]:
remainder = lines.pop()
# emit all complete lines
for line in lines:
self._emit_event(line)
if not self.suppress_ansible_output:
sys.stdout.write(
line.encode('utf-8') if PY2 else line
)
self._handle.write(line)
self._handle.flush()
self._buffer = StringIO()
# put final partial line back on buffer
if remainder:
self._buffer.write(remainder)
def close(self):
value = self._buffer.getvalue()
if value:
self._emit_event(value)
self._buffer = StringIO()
self._event_callback(dict(event='EOF'))
self._handle.close()
def _emit_event(self, buffered_stdout, next_event_data=None):
next_event_data = next_event_data or {}
if self._current_event_data:
event_data = self._current_event_data
stdout_chunks = [buffered_stdout]
elif buffered_stdout:
event_data = dict(event='verbose')
stdout_chunks = buffered_stdout.splitlines(True)
else:
event_data = dict()
stdout_chunks = []
for stdout_chunk in stdout_chunks:
if event_data.get('event') == 'verbose':
event_data['uuid'] = str(uuid.uuid4())
self._counter += 1
event_data['counter'] = self._counter
event_data['stdout'] = stdout_chunk[:-2] if len(stdout_chunk) > 2 else ""
n_lines = stdout_chunk.count('\n')
event_data['start_line'] = self._start_line
event_data['end_line'] = self._start_line + n_lines
self._start_line += n_lines
if self._event_callback:
self._event_callback(event_data)
if next_event_data.get('uuid', None):
self._current_event_data = next_event_data
else:
self._current_event_data = None
return event_data
def open_fifo_write(path, data):
'''open_fifo_write opens the fifo named pipe in a new thread.
This blocks the thread until an external process (such as ssh-agent)
reads data from the pipe.
'''
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)).start()
def args2cmdline(*args):
return ' '.join([pipes.quote(a) for a in args])
def ensure_str(s, encoding='utf-8', errors='strict'):
"""
Copied from six==1.12
Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def sanitize_container_name(original_name):
"""
Docker and podman will only accept certain characters in container names
This takes a given name from user-specified values and replaces the
invalid characters so it can be used in docker/podman CLI commands
"""
return re.sub('[^a-zA-Z0-9_-]', '_', text_type(original_name))
|
benchmark_server.py | import unittest
import urllib2
import json
import os
import signal
from uuid import uuid4
from toto.secret import *
from multiprocessing import Process, active_children
from toto.server import TotoServer
from time import sleep, time
def run_server(processes=1):
TotoServer(method_module='web_methods', port=9000, processes=processes).run()
class TestWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'Starting server'
cls.service_process = Process(target=run_server, args=[1])
cls.service_process.start()
sleep(0.5)
@classmethod
def tearDownClass(cls):
print 'Stopping server'
processes = [int(l.split()[0]) for l in os.popen('ps').readlines() if 'python' in l and 'unittest' in l]
for p in processes:
if p == os.getpid():
continue
print 'killing', p
os.kill(p, signal.SIGKILL)
sleep(0.5)
def test_method(self):
request = {}
request['method'] = 'test.ok'
request['parameters'] = {'arg1': 1, 'arg2': 'hello'}
headers = {'content-type': 'application/json'}
req = urllib2.Request('http://127.0.0.1:9000/', json.dumps(request), headers)
start = time()
for i in xrange(1000):
f = urllib2.urlopen(req)
total = time() - start
print '1000 requests in %s seconds\nAverage time %s ms (%s requests/second)' % (total, total/1000.0*1000.0, 1000.0/total)
|
emulator.py | """
Commodore-64 simulator in 100% pure Python 3.x :)
This module is the GUI window logic, handling keyboard input
and screen drawing via tkinter bitmaps.
Written by Irmen de Jong (irmen@razorvine.net)
License: MIT open-source.
"""
import io
import os
import sys
import tkinter
import pkgutil
import threading
import queue
import time
from collections import deque
from PIL import Image
from .memory import ScreenAndMemory
from .basic import BasicInterpreter
from .shared import ResetMachineException, do_sys
from .python import PythonInterpreter
def create_bitmaps_from_char_rom(temp_graphics_folder, roms_directory):
# create char bitmaps from the orignal c-64 chargen rom file
rom = open(roms_directory + "/chargen", "rb").read()
def doublewidth_and_mirror(b):
result = 0
for _ in range(8):
bit = b & 1
b >>= 1
result <<= 1
result |= bit
result <<= 1
result |= bit
x, y = divmod(result, 256)
return y, x
def writechar(c, rom_offset, filesuffix):
with open("{:s}/char{:s}-{:02x}.xbm".format(temp_graphics_folder, filesuffix, c), "wb") as outf:
outf.write(b"#define im_width 16\n")
outf.write(b"#define im_height 16\n")
outf.write(b"static char im_bits[] = {\n")
for y in range(8):
b1, b2 = doublewidth_and_mirror(rom[c * 8 + y + rom_offset])
outf.write(bytes("0x{:02x}, 0x{:02x}, 0x{:02x}, 0x{:02x}, ".format(b1, b2, b1, b2), "ascii"))
outf.seek(-2, os.SEEK_CUR) # get rid of the last space and comma
outf.write(b"\n};\n")
# normal chars
for c in range(256):
writechar(c, 0, "")
# shifted chars
for c in range(256):
writechar(c, 256 * 8, "-sh")
class EmulatorWindowBase(tkinter.Tk):
temp_graphics_folder = "temp_gfx"
update_rate = 1000 // 20 # 20 hz screen refresh rate
columns = 0
rows = 0
bordersize = 0
sprites = 0
smoothscrolling = True
windowgeometry = "+200+40"
charset_normal = "charset-normal.png"
charset_shifted = "charset-shifted.png"
colorpalette = []
welcome_message = "Welcome to the simulator!"
def __init__(self, screen, title, roms_directory):
if len(self.colorpalette) not in (2, 4, 8, 16, 32, 64, 128, 256):
raise ValueError("colorpalette size not a valid power of 2")
if self.columns <= 0 or self.columns > 128 or self.rows <= 0 or self.rows > 128:
raise ValueError("row/col size invalid")
if self.bordersize < 0 or self.bordersize > 256:
raise ValueError("bordersize invalid")
if self.sprites < 0 or self.sprites > 256:
raise ValueError("sprites invalid")
super().__init__()
self.wm_title(title)
self.appicon = tkinter.PhotoImage(data=pkgutil.get_data(__name__, "icon.gif"))
self.wm_iconphoto(self, self.appicon)
if sys.platform == "win32":
# tell windows to use a new toolbar icon
import ctypes
myappid = 'net.Razorvine.Tale.story' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
self.geometry(self.windowgeometry)
self.screen = screen
self.canvas = tkinter.Canvas(self, width=2 * self.bordersize + self.columns * 16,
height=2 * self.bordersize + self.rows * 16,
borderwidth=0, highlightthickness=0, background="black",
xscrollincrement=1, yscrollincrement=1)
self.buttonbar = tkinter.Frame(self)
resetbut1 = tkinter.Button(self.buttonbar, text="reset", command=self.reset_machine)
resetbut1.pack(side=tkinter.LEFT)
self.buttonbar.pack(fill=tkinter.X)
self.refreshtick = threading.Event()
self.spritebitmapbytes = [None] * self.sprites
self.spritebitmaps = []
self.roms_directory = roms_directory
self.create_bitmaps(self.roms_directory)
# create the character bitmaps for all character tiles, fixed on the canvas:
self.charbitmaps = []
for y in range(self.rows):
for x in range(self.columns):
cor = self.screencor((x, y))
bm = self.canvas.create_bitmap(cor[0], cor[1], bitmap="@" + self.temp_graphics_folder + "/char-20.xbm",
foreground="white", background="black", anchor=tkinter.NW,
tags="charbitmap")
self.charbitmaps.append(bm)
# create the sprite tkinter bitmaps:
for i in range(self.sprites - 1, -1, -1):
cor = self.screencor_sprite((30 + i * 20, 140 + i * 10))
bm = self.canvas.create_bitmap(cor[0], cor[1],
bitmap="@{:s}/sprite-{:d}.xbm".format(self.temp_graphics_folder, i),
foreground=self.tkcolor(i + 8), background=None, anchor=tkinter.NW,
tags="spritebitmap")
self.spritebitmaps.insert(0, bm)
# the borders:
if self.bordersize > 0:
b1, b2, b3, b4 = self._border_positions()
self.border1 = self.canvas.create_rectangle(*b1, outline="", fill="#000")
self.canvas.tag_raise(self.border1)
self.border2 = self.canvas.create_rectangle(*b2, outline="", fill="#000")
self.canvas.tag_raise(self.border2)
self.border3 = self.canvas.create_rectangle(*b3, outline="", fill="#000")
self.canvas.tag_raise(self.border3)
self.border4 = self.canvas.create_rectangle(*b4, outline="", fill="#000")
self.canvas.tag_raise(self.border4)
# self.bind("<KeyPress>", self.keypress)
self.bind("<KeyRelease>", self.keyrelease)
self.bind("<Key>", self.keypress)
self.canvas.pack()
def start(self):
self._cyclic_repaint()
self._welcome_message()
def _welcome_message(self):
if self.welcome_message:
topleft = self.screencor((0, 0))
introtxt = self.canvas.create_text(topleft[0] + 16 * self.columns // 2,
topleft[0] + 16 * (self.rows // 2 - 2),
fill="white", justify=tkinter.CENTER,
text=self.welcome_message)
self.after(4000, lambda: self.canvas.delete(introtxt))
def _cyclic_repaint(self):
starttime = time.perf_counter()
self.repaint()
duration = time.perf_counter() - starttime
remaining_timer_budget = (self.update_rate / 1000) - duration
if remaining_timer_budget < 0.001:
print("warning: screen refresh took too long! ", remaining_timer_budget, file=sys.stderr)
remaining_timer_budget = 0.001
self.cyclic_repaint_after = self.after(int(remaining_timer_budget * 1000), self._cyclic_repaint)
def keypress(self, event):
pass # override in subclass
def keyrelease(self, event):
pass # override in subclass
def repaint(self):
# set bordercolor, done by setting the 4 border rectangles
# (screen color done by setting the background color of all character bitmaps,
# this is a lot faster than using many transparent bitmaps!)
if self.bordersize > 0:
bordercolor = self.tkcolor(self.screen.border)
if self.canvas.itemcget(self.border1, "fill") != bordercolor:
self.canvas.itemconfigure(self.border1, fill=bordercolor)
self.canvas.itemconfigure(self.border2, fill=bordercolor)
self.canvas.itemconfigure(self.border3, fill=bordercolor)
self.canvas.itemconfigure(self.border4, fill=bordercolor)
# adjust borders
bc1_new, bc2_new, bc3_new, bc4_new = self._border_positions()
bc1 = self.canvas.coords(self.border1)
bc2 = self.canvas.coords(self.border2)
bc3 = self.canvas.coords(self.border3)
bc4 = self.canvas.coords(self.border4)
if bc1_new != bc1:
self.canvas.coords(self.border1, bc1_new)
if bc2_new != bc2:
self.canvas.coords(self.border2, bc2_new)
if bc3_new != bc3:
self.canvas.coords(self.border3, bc3_new)
if bc4_new != bc4:
self.canvas.coords(self.border4, bc4_new)
# characters
prefix = "char-sh" if self.screen.shifted else "char"
dirty = self.screen.getdirty()
screencolor = self.tkcolor(self.screen.screen)
for index, (char, color) in dirty:
forecol = self.tkcolor(color)
bm = self.charbitmaps[index]
bitmap = "@{:s}/{:s}-{:02x}.xbm".format(self.temp_graphics_folder, prefix, char)
self.canvas.itemconfigure(bm, foreground=forecol, background=screencolor, bitmap=bitmap)
# smooth scroll
if self.smoothscrolling:
xys = self.smoothscroll(self.screen.scrollx, self.screen.scrolly)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.xview_scroll(xys[0], tkinter.UNITS)
self.canvas.yview_scroll(xys[1], tkinter.UNITS)
# sprites
sprites = self.screen.getsprites()
for snum, sprite in sprites.items():
configure = {}
# sprite double sizes
current_bm = self.canvas.itemcget(self.spritebitmaps[snum], "bitmap")
extension = "-2x" if sprite.doublex else ""
extension += "-2y" if sprite.doubley else ""
if sprite.doublex != ("-2x" in current_bm) or sprite.doubley != ("-2y" in current_bm):
# size change
configure["bitmap"] = "@{:s}/sprite-{:d}{:s}.xbm".format(self.temp_graphics_folder, snum, extension)
# bitmapdata
if sprite.bitmap != self.spritebitmapbytes[snum]:
# regenerate sprite bitmap
self.create_sprite_bitmap(snum, sprite.bitmap)
self.spritebitmapbytes[snum] = sprite.bitmap
# first, configure another bitmap to force the old one out
self.canvas.itemconfigure(self.spritebitmaps[snum],
bitmap="@{:s}/char-00.xbm".format(self.temp_graphics_folder))
# schedule reloading the sprite bitmap:
configure["bitmap"] = "@{:s}/sprite-{:d}{:s}.xbm".format(self.temp_graphics_folder, snum, extension)
# sprite enabled
tkstate = tkinter.NORMAL if sprite.enabled else tkinter.HIDDEN
if self.canvas.itemcget(self.spritebitmaps[snum], "state") != tkstate:
configure["state"] = tkstate
# sprite colors
spritecolor = self.tkcolor(sprite.color)
if self.canvas.itemcget(self.spritebitmaps[snum], "foreground") != spritecolor:
configure["foreground"] = spritecolor
# sprite positions
x, y = self.screencor_sprite((sprite.x, sprite.y))
self.canvas.coords(self.spritebitmaps[snum], x - 2 * self.screen.scrollx, y - 2 * self.screen.scrolly)
if configure:
# reconfigure all changed properties in one go
self.canvas.itemconfigure(self.spritebitmaps[snum], **configure)
self.update_idletasks()
self.refreshtick.set()
def smoothscroll(self, xs, ys):
return -xs * 2, -self.ys * 2
def create_bitmaps(self, roms_directory=""):
os.makedirs(self.temp_graphics_folder, exist_ok=True)
with open(self.temp_graphics_folder + "/readme.txt", "w") as f:
f.write("this is a temporary folder to cache pyc64 files for tkinter graphics bitmaps.\n")
if roms_directory and os.path.isfile(roms_directory + "/chargen"):
# create char bitmaps from the C64 chargen rom file.
print("creating char bitmaps from chargen rom")
create_bitmaps_from_char_rom(self.temp_graphics_folder, roms_directory)
else:
if roms_directory:
print(
"creating char bitmaps from png images in the package (consider supplying {:s}/chargen ROM file)".format(
roms_directory))
else:
print("creating char bitmaps from png images in the package")
# normal
with Image.open(io.BytesIO(pkgutil.get_data(__name__, "charset/" + self.charset_normal))) as source_chars:
for i in range(256):
filename = self.temp_graphics_folder + "/char-{:02x}.xbm".format(i)
chars = source_chars.copy()
row, col = divmod(i, source_chars.width // 16) # we assume 16x16 pixel chars (2x zoom)
ci = chars.crop((col * 16, row * 16, col * 16 + 16, row * 16 + 16))
ci = ci.convert(mode="1", dither=None)
ci.save(filename, "xbm")
# shifted
with Image.open(io.BytesIO(pkgutil.get_data(__name__, "charset/" + self.charset_shifted))) as source_chars:
for i in range(256):
filename = self.temp_graphics_folder + "/char-sh-{:02x}.xbm".format(i)
chars = source_chars.copy()
row, col = divmod(i, source_chars.width // 16) # we assume 16x16 pixel chars (2x zoom)
ci = chars.crop((col * 16, row * 16, col * 16 + 16, row * 16 + 16))
ci = ci.convert(mode="1", dither=None)
ci.save(filename, "xbm")
# monochrome sprites (including their double-size variants)
sprites = self.screen.getsprites()
for i, sprite in sprites.items():
self.create_sprite_bitmap(i, sprite.bitmap)
self.spritebitmapbytes[i] = sprite.bitmap
def _border_positions(self):
if self.smoothscrolling:
sx, sy = self.smoothscroll(self.screen.scrollx, self.screen.scrolly)
else:
sx, sy = 0, 0
return [
[sx, sy,
2 * self.bordersize + self.columns * 16 + sx, self.bordersize + sy],
[self.bordersize + self.columns * 16 + sx, self.bordersize + sy,
2 * self.bordersize + self.columns * 16 + sx, self.bordersize + self.rows * 16 + sy],
[sx, self.bordersize + self.rows * 16 + sy,
2 * self.bordersize + self.columns * 16 + sx, 2 * self.bordersize + self.rows * 16 + sy],
[sx, self.bordersize + sy,
self.bordersize + sx, self.bordersize + self.rows * 16 + sy]
]
def screencor(self, cxy):
return self.bordersize + cxy[0] * 16, self.bordersize + cxy[1] * 16
def screencor_sprite(self, cxy):
return self.bordersize + cxy[0] * 2, self.bordersize + cxy[1] * 2
def tkcolor(self, color):
return "#{:06x}".format(self.colorpalette[color & len(self.colorpalette) - 1])
def create_sprite_bitmap(self, spritenum, bitmapbytes):
raise NotImplementedError("implement in subclass")
def reset_machine(self):
self.screen.reset(False)
self.repaint()
class C64EmulatorWindow(EmulatorWindowBase):
columns = 40
rows = 25
bordersize = 52
sprites = 8
colorpalette = ScreenAndMemory.colorpalette_pepto
welcome_message = "pyc64 basic & function keys active\n\n" \
"use 'gopy' to enter Python mode\n\n\n\n" \
"(install the py64 library to be able to execute 6502 machine code)"
def __init__(self, screen, title, roms_directory, run_real_roms):
super().__init__(screen, title, roms_directory)
self.screen.memory[0x00fb] = EmulatorWindowBase.update_rate
self.hertztick = threading.Event()
self.interpret_thread = None
self.interpreter = None
self.real_cpu_running = None
self.run_real_roms = run_real_roms
if run_real_roms:
if not os.path.isfile(os.path.join(roms_directory, "chargen")):
raise IOError("cannot find the 'chargen' ROM file")
if not os.path.isfile(os.path.join(roms_directory, "basic")):
raise IOError("cannot find the 'basic' ROM file")
if not os.path.isfile(os.path.join(roms_directory, "kernal")):
raise IOError("cannot find the 'kernal' ROM file")
def start(self):
super().start()
if not self.run_real_roms:
self._cyclic_herztick()
self._cyclic_blink_cursor()
self.reset_machine()
def _cyclic_herztick(self):
self.after(1000 // self.screen.hz, self._cyclic_herztick)
self.screen.hztick()
self.hertztick.set()
def _cyclic_blink_cursor(self):
self.cyclic_blink_after = self.after(self.screen.cursor_blink_rate, self._cyclic_blink_cursor)
self.screen.blink_cursor()
@property
def update_rate(self):
return max(10, self.screen.memory[0x00fb])
joystick_keys_sane_platforms = {
"Control_R": "fire",
"KP_Insert": "fire",
"KP_0": "fire",
"KP_Enter": "fire",
"Alt_R": "fire",
"KP_Up": "up",
"KP_8": "up",
"KP_Down": "down",
"KP_2": "down",
"KP_Left": "left",
"KP_4": "left",
"KP_Right": "right",
"KP_6": "right",
"KP_Home": "leftup",
"KP_7": "leftup",
"KP_Prior": "rightup",
"KP_9": "rightup",
"KP_End": "leftdown",
"KP_1": "leftdown",
"KP_Next": "rightdown",
"KP_3": "rightdown"
}
joystick_keys_osx = {
524352: "fire", # R alt
270336: "fire", # R control
5374000: "fire", # kp 0
498073: "fire", # kp Enter
5963832: "up", # kp 8
5505074: "down", # kp 2
5636148: "left", # kp 4
5767222: "right", # kp 6
5832759: "leftup", # kp 7
6029369: "rightup", # kp 9
5439537: "leftdown", # kp 1
5570611: "rightdown", # kp 3
}
joystick_keys_windows_keycode = {
96: "fire", # kp 0 (numlock)
104: "up", # kp 8 (numlock)
98: "down", # kp 2 (numlock)
100: "left", # kp 4 (numlock)
102: "right", # kp 6 (numlock)
103: "leftup", # kp 7 (numlock)
105: "rightup", # kp 9 (numlock)
97: "leftdown", # kp 1 (numlock)
99: "rightdown" # kp 3 (numlock)
}
def keyrelease(self, event):
# first check special control keys
if sys.platform == "darwin":
# OSX numkeys are problematic, I try to solve this via raw keycode
if event.keycode in self.joystick_keys_osx:
self.screen.setjoystick(**{self.joystick_keys_osx[event.keycode]: False})
return
elif sys.platform == "win32":
# Windows numkeys are also problematic, need to solve this via keysym_num OR via keycode.. (sigh)
if event.keycode in self.joystick_keys_windows_keycode:
self.screen.setjoystick(**{self.joystick_keys_windows_keycode[event.keycode]: False})
return
# sane platforms (Linux for one) play nice and just use the friendly keysym name.
elif event.keysym in self.joystick_keys_sane_platforms:
self.screen.setjoystick(**{self.joystick_keys_sane_platforms[event.keysym]: False})
return
def keypress(self, event):
# first check special control keys
if sys.platform == "darwin":
# OSX numkeys are problematic, I try to solve this via raw keycode
if event.keycode in self.joystick_keys_osx:
self.screen.setjoystick(**{self.joystick_keys_osx[event.keycode]: True})
return
elif sys.platform == "win32":
# Windows numkeys are also problematic, need to solve this via keysym_num OR via keycode.. (sigh)
if event.keycode in self.joystick_keys_windows_keycode:
self.screen.setjoystick(**{self.joystick_keys_windows_keycode[event.keycode]: True})
return
# sane platforms (Linux for one) play nice and just use the friendly keysym name.
elif event.keysym in self.joystick_keys_sane_platforms:
self.screen.setjoystick(**{self.joystick_keys_sane_platforms[event.keysym]: True})
return
# turn the event into a bit more managable key character
char = event.char
if not char or ord(char) > 255:
char = event.keysym
#print("keypress", repr(char), event.state)
with_shift = event.state & 1
with_control = event.state & 4
with_alt = event.state & 8
if char.startswith("Shift") and with_control or char.startswith("Control") and with_shift \
or char == "??" and with_control and with_shift:
# simulate SHIFT+COMMODORE_KEY to flip the charset
self.screen.shifted = not self.screen.shifted
return
if self.interpret_thread.running_something:
# we're running something so only the break key should do something!
if char == '\x03' and with_control: # ctrl+C
self.interpret_thread.runstop()
elif char == '\x1b': # esc
self.interpret_thread.runstop()
else:
# buffer the keypress (if it's not the pgup=RESTORE key)
if char != 'Prior':
self.interpret_thread.buffer_keypress(char, event)
return
# Fixes for OSX Keys
if char == 'Return':
line = self.screen.current_line(True, True, "ascii")
line1, line2, line3 = line[0: self.columns], line[self.columns: self.columns * 2], line[
self.columns * 2:]
if line1.endswith(' '):
line1 = ''
if line2.endswith(' '):
line3 = ''
else:
line1 = ''
line = (line1 + line2 + line3).rstrip()
self.screen.return_key()
if len(line) > self.columns and not line1:
self.screen.return_key()
if not with_shift:
self.execute_direct_line(line)
if char == 'BackSpace':
if with_shift:
self.screen.insert()
else:
self.screen.backspace()
if char == 'Escape':
self.interpret_thread.runstop()
if len(char) == 1:
# if '1' <= char <= '8' and self.key_control_down:
# self.c64screen.text = ord(char)-1
if char == '\r': # RETURN key
line = self.screen.current_line(True, True, "ascii")
line1, line2, line3 = line[0: self.columns], line[self.columns: self.columns * 2], line[
self.columns * 2:]
if line1.endswith(' '):
line1 = ''
if line2.endswith(' '):
line3 = ''
else:
line1 = ''
line = (line1 + line2 + line3).rstrip()
self.screen.return_key()
if len(line) > self.columns and not line1:
self.screen.return_key()
if not with_shift:
self.execute_direct_line(line)
elif char in ('\x08', '\x7f', 'Delete'):
if with_shift:
self.screen.insert()
else:
self.screen.backspace()
elif char == '\x03' and with_control: # ctrl+C
self.interpret_thread.runstop()
elif char == '\x1b': # esc
self.interpret_thread.runstop()
else:
self.screen.writestr(char)
self.repaint()
else:
# some control character
if char == "Up":
self.screen.up()
self.repaint()
elif char == "Down":
self.screen.down()
self.repaint()
elif char == "Left":
self.screen.left()
self.repaint()
elif char == "Right":
self.screen.right()
self.repaint()
elif char == "Home":
if with_shift:
self.screen.clear()
else:
self.screen.cursormove(0, 0)
self.repaint()
elif char == "End":
# move to end of current line
x, y = self.screen.cursorpos()
line = self.screen.current_line(False, True, "screencodes").rstrip()
x = len(line)
if x > self.columns:
if line[self.columns - 1] == ' ':
line = line[:self.columns].rstrip()
x = len(line)
else:
y += 1
x -= self.columns
if x and x % self.columns == 0:
x -= 1
self.screen.cursormove(min(x, self.columns), y)
elif char in ("Insert", "Help"):
self.screen.insert()
self.repaint()
elif char == "F7": # directory shortcut key
self.screen.writestr(self.interpreter.F7_dir_command + "\n")
self.execute_direct_line(self.interpreter.F7_dir_command)
elif char == "F5": # load file shortcut key
if with_shift:
self.screen.writestr(self.interpreter.F6_load_command + "\n")
self.execute_direct_line(self.interpreter.F6_load_command)
else:
self.screen.writestr(self.interpreter.F5_load_command)
line = self.screen.current_line(False, False, "ascii")
self.screen.return_key()
self.execute_direct_line(line)
elif char == "F3": # run program shortcut key
self.screen.writestr(self.interpreter.F3_run_command + "\n")
self.execute_direct_line(self.interpreter.F3_run_command)
elif char == "F1": # list program shortcut key
self.screen.writestr(self.interpreter.F1_list_command + "\n")
self.execute_direct_line(self.interpreter.F1_list_command)
elif char == "Prior": # pageup = RESTORE (outside running program)
if not self.interpret_thread.running_something:
self.screen.reset()
self.screen.memory[0x00fb] = EmulatorWindowBase.update_rate
self.interpreter.write_prompt("\n")
def execute_direct_line(self, line):
line = line.strip()
if line.startswith("gopy"):
self.switch_interpreter("python")
return
elif line.startswith((">>> go64", "go64")):
self.switch_interpreter("basic")
return
self.interpret_thread.submit_line(line)
def switch_interpreter(self, interpreter):
if self.interpret_thread:
self.interpret_thread.stop()
if self.interpreter:
self.interpreter.stop()
self.hertztick.set()
self.screen.reset()
self.screen.memory[0x00fb] = EmulatorWindowBase.update_rate
self.repaint()
if interpreter == "basic":
self.interpreter = BasicInterpreter(self.screen)
self.interpret_thread = InterpretThread(self.interpreter, self)
self.interpreter.interactive = self.interpret_thread
self.interpreter.start()
self.interpret_thread.start()
elif interpreter == "python":
self.interpreter = PythonInterpreter(self.screen)
self.interpret_thread = InterpretThread(self.interpreter, self)
self.interpreter.interactive = self.interpret_thread
self.interpreter.start()
self.interpret_thread.start()
else:
raise ValueError("invalid interpreter")
def create_sprite_bitmap(self, spritenum, bitmapbytes):
with Image.frombytes("1", (24, 21), bytes(bitmapbytes)) as si:
si = si.resize((48, 42), 0)
si.save(self.temp_graphics_folder + "/sprite-{:d}.xbm".format(spritenum), "xbm")
dx = si.resize((96, 42), 0)
dx.save(self.temp_graphics_folder + "/sprite-{:d}-2x.xbm".format(spritenum), "xbm")
dy = si.resize((48, 84), 0)
dy.save(self.temp_graphics_folder + "/sprite-{:d}-2y.xbm".format(spritenum), "xbm")
dxy = si.resize((96, 84), 0)
dxy.save(self.temp_graphics_folder + "/sprite-{:d}-2x-2y.xbm".format(spritenum), "xbm")
def screencor_sprite(self, cc):
# on the C-64, sprite upper left = (24, 50)
# so subtract from regular origin (self.borderwidth, self.borderwidth) (scaled by 2 pixels)
return cc[0] * 2 + self.bordersize - 48, cc[1] * 2 + self.bordersize - 100
def smoothscroll(self, xs, ys):
# c64 smooth scrolling in Y axis has offset of 3 pixels
return -xs * 2, -(ys - 3) * 2
def _border_positions(self):
b1, b2, b3, b4 = super()._border_positions()
# adjust borders for the 24 row and/or 38 column mode
# left_xa = right_xa = top_ya = bottom_ya = 0
if self.screen.rsel24:
b1[3] += 8
b2[1] += 8
b4[1] += 8
b3[1] -= 8
if self.screen.csel38:
b4[2] += 14
b2[0] -= 18
return b1, b2, b3, b4
def reset_machine(self):
super().reset_machine()
self.screen.memory[0x00fb] = EmulatorWindowBase.update_rate
if not self.run_real_roms:
self.switch_interpreter("basic")
if self.screen.using_roms:
reset = self.screen.memory.getword(0xfffc)
print("using actual ROM reset routine at", reset)
if self.run_real_roms:
if self.real_cpu_running is None:
threading.Thread(target=self.run_rom_code, args=(reset,), daemon=True).start()
else:
self.real_cpu_running.reset()
else:
do_sys(self.screen, reset, self.interpret_thread._microsleep, use_rom_routines=True)
self.interpreter.write_prompt("\n\n\n\n\n")
class InterpretThread(threading.Thread):
# basic interpreter runs in a worker thread so the GUI can continue handling its events normally.
def __init__(self, interpreter, window):
super(InterpretThread, self).__init__(name="interpreter", daemon=True)
self.direct_queue = queue.Queue()
self.interpreter = interpreter
self.interpret_lock = threading.Lock()
self.keybuffer_lock = threading.Lock()
self.running_program = False
self.executing_line = False
self.must_stop = False
self.window = window
self.keybuffer = deque(maxlen=16)
self.step_counter = 0
@property
def running_something(self):
return self.running_program or self.executing_line or self.interpreter.sleep_until is not None
def run(self):
while not self.must_stop:
try:
# see if we have buffered keys to be handled
if not self.running_something:
with self.keybuffer_lock:
keyevents = list(self.keybuffer)
self.keybuffer.clear()
for when, (char, event) in enumerate(keyevents, start=1):
self.window.after(when, self.window.keypress(event))
# look for work
if self.running_program:
with self.interpret_lock:
self.interpreter.program_step()
self.running_program = self.interpreter.running_program
self.step_counter += 1
if self.step_counter > 200: # control program execution speed with this
self.step_counter = 0
self._microsleep()
if not self.running_program:
self.window.screen.cursor_enabled = True
else:
# check if interpreter is doing a sleep instruction
if self.interpreter.sleep_until is not None:
time_left = self.interpreter.sleep_until - time.time()
if time_left > 0:
if os.name == "nt" and time_left <= 0.016:
self._microsleep() # because on Windows, sleep() takes too long
else:
time.sleep(min(0.1, time_left))
continue
self.interpreter.sleep_until = None
if not self.running_program:
self.window.screen.cursor_enabled = True
self.interpreter.write_prompt("\n")
self.executing_line = False
continue
# check for direct line commands
command = self.direct_queue.get()
if command is None:
break
self.window.screen.cursor_enabled = False
self.executing_line = True
with self.interpret_lock:
self.interpreter.execute_line(command)
self.running_program = self.interpreter.running_program
self._microsleep()
if not self.running_program and not self.interpreter.sleep_until:
self.window.screen.cursor_enabled = True
self.executing_line = False
except ResetMachineException:
self.stop()
self.window.after(1, self.window.reset_machine)
def _microsleep(self):
# artificial microscopic delay to yield the thread and allow screen to refresh
self.window.hertztick.wait(.02)
self.window.hertztick.clear()
def stop(self):
self.interpreter.runstop()
self.must_stop = True
self.direct_queue.put(None) # sentinel
self.window.hertztick.set()
time.sleep(0.1)
def buffer_keypress(self, char, event):
with self.keybuffer_lock:
self.keybuffer.append((char, event))
def get_bufferedkeyevent(self):
try:
with self.keybuffer_lock:
return self.keybuffer.popleft()
except IndexError:
return (None, None)
def do_get_command(self):
char, event = self.get_bufferedkeyevent()
if event:
if len(char) == 1:
return char
else:
pass # @todo handle control characters? (F1 etc) INPUT would also need that
return ''
def do_sync_command(self):
self.window.refreshtick.wait(self.window.update_rate / 1000 * 2)
self.window.refreshtick.clear()
def submit_line(self, line):
self.direct_queue.put(line)
def runstop(self):
self.interpreter.runstop()
with self.interpret_lock:
if (self.executing_line or self.interpreter.sleep_until) and not self.running_program:
self.window.screen.writestr("\n?break error")
if self.interpreter.sleep_until:
self.interpreter.sleep_until = 1
with self.keybuffer_lock:
self.keybuffer.clear()
def start(run_real_roms):
rom_directory = "roms"
screen = ScreenAndMemory(columns=C64EmulatorWindow.columns,
rows=C64EmulatorWindow.rows,
sprites=C64EmulatorWindow.sprites,
rom_directory=rom_directory,
run_real_roms=run_real_roms)
if run_real_roms:
from .realemulator import RealC64EmulatorWindow
emu = RealC64EmulatorWindow(screen, "Commodore-64 emulator in pure Python! - running actual roms",
rom_directory, [])
else:
emu = C64EmulatorWindow(screen, "Commodore-64 simulator in pure Python!", rom_directory, False)
emu.start()
emu.mainloop()
if __name__ == "__main__":
start(run_real_roms=True)
|
plc.py | import threading
import time
from asyncio import Queue
from collections import deque
from pprint import pprint
from typing import List, Optional, Any
import snap7
from snap7.snap7types import S7AreaDB, S7WLReal, S7WLBit, S7WLByte
from stream2py import SourceReader
from stream2py.sources.raw_plc import PlcRawRead, PlcDataItem, get_byte
from stream2py.utility.typing_hints import ComparableType
class PlcReader(SourceReader):
"""
TODO: Finish class implementation
"""
def __init__(self, ip_address: str, *, items_to_read: List[PlcDataItem],
rack: int, slot: int, tcp_port: int = 102, sleep_time=1.0):
self._init_kwargs = {k: v for k, v in locals().items() if k not in ('self', '__class__')}
self._ip_address = ip_address
self._rack = rack
self._slot = slot
self._tcp_port = tcp_port
self._items_to_read = items_to_read
self._sleep_time = sleep_time
# validate IP address
import socket
socket.inet_aton(self._ip_address) # validate IP Address
self._plc_raw_reader = PlcRawRead(self._ip_address, rack=self._rack, slot=self._slot, tcp_port=self._tcp_port)
self.bt = None
self._start_time = None
#self._data_lock = threading.Lock()
self._data_read_thread_exit = threading.Event()
self._data_read_thread = None
self._data_read_thread_exit.clear()
self.data = deque() #Queue()
self.plc_info = dict()
self.reader_thread = None
def _stream_thread(self):
_sleep_time = self.sleep_time_on_read_none_s
while not self._data_read_thread_exit.is_set():
data_item = self._plc_raw_reader.read_items(self._items_to_read)
self.data.append(data_item)
if _sleep_time > 0:
time.sleep(_sleep_time)
@property
def sleep_time_on_read_none_s(self) -> float:
return self._sleep_time
def open(self) -> bool:
if self._plc_raw_reader.open():
self.bt = self.get_timestamp()
self._start_time = self.bt
self.plc_info = self._plc_raw_reader.get_info()
if self._data_read_thread is None:
self._data_read_thread = threading.Thread(target=self._stream_thread)
self._data_read_thread.start()
return True
return False
def read(self, blocking : bool = False, timeout:int = 0) -> Optional[Any]:
"""
:return: timestamp, plc info, read db items as key:value
"""
if len(self.data):
return self.data.popleft()
#return self.data.get(block = blocking, timeout=timeout)
def close(self) -> None:
"""Close and clean up source reader.
Will be called when StreamBuffer stops or if an exception is raised during read and append loop.
"""
self._data_read_thread_exit.set()
self._plc_raw_reader.close()
@property
def info(self) -> dict:
_info = {'bt': self.bt}
_info.update(**self._init_kwargs)
_info.update(plc_info=self.plc_info)
return _info
def key(self, data_item: Any or None) -> ComparableType:
import operator
return operator.itemgetter("bt")(data_item)
if __name__ == '__main__':
#
# read_items = [
# PlcDataItem(
# key='temperature',
# area=S7AreaDB,
# word_len=S7WLReal,
# db_number=3,
# start=2,
# amount=1,
# convert=snap7.util.get_real),
#
# PlcDataItem(
# key='led1',
# area=S7AreaDB,
# word_len=S7WLBit,
# db_number=3,
# start=0 * 8 + 0, # bit ofsset
# amount=1,
# convert=snap7.util.get_bool,
# convert_args=(0, 0)),
#
# PlcDataItem(
# key='led2',
# area=S7AreaDB,
# word_len=S7WLBit,
# db_number=3,
# start=0 * 8 + 1, # bit ofsset
# amount=1,
# convert=snap7.util.get_bool,
# convert_args=(0, 0)),
# ]
read_items = [
PlcDataItem(
key='PLC Motor Status',
area=S7AreaDB,
word_len=S7WLBit,
db_number=1,
start=0 * 8 + 0, # bit offset
amount=1,
convert=snap7.util.get_bool,
convert_args=(0, 0)),
PlcDataItem(
key='PLC LED Status',
area=S7AreaDB,
word_len=S7WLBit,
db_number=1,
start=0 * 8 + 1, # bit offset
amount=1,
convert=snap7.util.get_bool,
convert_args=(0, 0)),
PlcDataItem(
key='NetHAT Motor Speed',
area=S7AreaDB,
word_len=S7WLByte,
db_number=1,
start=3,
amount=1,
convert=get_byte,
convert_args=(0,)),
PlcDataItem(
key='NetHAT LED Brightness',
area=S7AreaDB,
word_len=S7WLByte,
db_number=1,
start=4,
amount=1,
convert=get_byte,
convert_args=(0,)),
]
preader = PlcReader('192.168.0.19', items_to_read=read_items,
rack=0, slot=0, sleep_time=0)
if not preader.open():
preader.close()
exit(-1)
can_run: bool = True
_i = preader.info
pprint(preader.info)
while can_run:
try:
data = preader.read()
if data is None:
time.sleep(0.5)
else:
pass
# pprint(data)
# print()
except KeyboardInterrupt as kb:
can_run = False
"""
Ouptut::
{'bt': 1584040986041418,
'ip_address': '192.168.0.19',
'items_to_read': [PlcDataItem(area=132, word_len=8, db_number=3, start=2, amount=1, key='temperature', convert=<function get_real at 0x7f8488038200>, convert_args=None),
PlcDataItem(area=132, word_len=1, db_number=3, start=0, amount=1, key='led1', convert=<function get_bool at 0x7f84780c2b00>, convert_args=(0, 0)),
PlcDataItem(area=132, word_len=1, db_number=3, start=1, amount=1, key='led2', convert=<function get_bool at 0x7f84780c2b00>, convert_args=(0, 0))],
'plc_info': {'cpu_info': {'ASName': b'S71500/ET200MP station_1',
'Copyright': b'Original Siemens Equipment',
'ModuleName': b'PLC_1',
'ModuleTypeName': b'CPU 1511C-1 PN',
'SerialNumber': b'S V-L9AL98812019'},
'cpu_state': 'S7CpuStatusRun',
'pdu_len': 480},
'rack': 0,
'sleep_time': 1.0,
'slot': 0,
'tcp_port': 102}
[{'key': 'temperature', 'ts': 1584040986051531, 'value': 11.0},
{'key': 'led1', 'ts': 1584040986051538, 'value': False},
{'key': 'led2', 'ts': 1584040986051540, 'value': True}]
[{'key': 'temperature', 'ts': 1584040987061010, 'value': 11.0},
{'key': 'led1', 'ts': 1584040987061022, 'value': False},
{'key': 'led2', 'ts': 1584040987061029, 'value': True}]
[{'key': 'temperature', 'ts': 1584040988080404, 'value': 11.0},
{'key': 'led1', 'ts': 1584040988080412, 'value': False},
{'key': 'led2', 'ts': 1584040988080415, 'value': True}]
[{'key': 'temperature', 'ts': 1584040989104420, 'value': 11.0},
{'key': 'led1', 'ts': 1584040989104428, 'value': False},
{'key': 'led2', 'ts': 1584040989104431, 'value': True}]
"""
|
saver.py | # burp extension to save page response content from the site map via a right click
#burp imports
from burp import IBurpExtender
from burp import IContextMenuFactory
#Java imports
from javax.swing import JMenuItem
from java.util import List,ArrayList
from java.net import URL
#python imports
import threading
import os
import sys
from binascii import hexlify
# basedir in which to save responses
baseDir = '/tmp/'
siteMapRetrieve = False # try and rget response data from site map if its not in selected message
class BurpExtender(IBurpExtender,IContextMenuFactory):
def registerExtenderCallbacks(self,callbacks):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
self.callbacks.setExtensionName("Item response saver")
self.callbacks.registerContextMenuFactory(self)
self._createIfNotExist(baseDir)
self.stdout = callbacks.getStdout()
self.stderr = callbacks.getStderr()
return
def createMenuItems(self, IContextMenuInvocation):
self.selectedMessages = IContextMenuInvocation.getSelectedMessages()
menuItemList = ArrayList()
menuItemList.add(JMenuItem("Save responses", actionPerformed = self.onClick))
return menuItemList
def _createIfNotExist(self, dir):
if not os.path.isdir(dir):
os.mkdir(dir)
def download(self, messages):
print 'About to save {} requests to disk...'.format(len(messages))
filenames = {}
try:
for message in messages:
srv_a = self.helpers.analyzeRequest(message)
this_url = srv_a.getUrl().toString().split(":")[0] + ":" + srv_a.getUrl().toString().split(":")[1] + "/" + srv_a.getUrl().toString().split(":")[2].split("/",1)[1]
responseInMessage = False
if 'getResponse' in dir(message):
rd = message.getResponse()
if rd:
ar = self.helpers.analyzeResponse(rd)
bo = ar.getBodyOffset()
response = self.helpers.bytesToString(rd)[bo:]
if len(response) > 2:
responseInMessage = True
else:
print 'Response content for {} could not be retrieved from selected message'.format(this_url)
if siteMapRetrieve and not responseInMessage: # response not in message, try and retrieve from site map
sm = self.callbacks.getSiteMap(this_url)
for sme in sm:
srv_b = self.helpers.analyzeRequest(sme)
entry_url = srv_b.getUrl().toString().split(":")[0] + ":" + srv_b.getUrl().toString().split(":")[1] + "/" + srv_b.getUrl().toString().split(":")[2].split("/",1)[1]
if this_url == entry_url:
rd = sme.getResponse()
if rd:
ar = self.helpers.analyzeResponse(rd)
bo = ar.getBodyOffset()
response = self.helpers.bytesToString(rd)[bo:]
if len(response) > 2:
break
bits = [a for a in ("/" + srv_a.getUrl().toString().split(":")[2].split("/",1)[1].split('?')[0]).split('/') if a]
fn = baseDir + '/'.join(bits)
od = baseDir
for di in bits[:-1]:
od = os.path.join(od, di)
self._createIfNotExist(od)
if fn in filenames:
filenames[fn] += 1
fn = '{}_{}'.format(fn, str(filenames[fn]-1))
else:
filenames[fn] = 0
open(fn, 'wb').write(response.encode('utf8'))
except Exception as e:
self.stderr.write('An error occurred: {}'.format(e))
print 'Saved {} requests to disk!'.format(len(messages))
def onClick(self, event):
requests = self.selectedMessages
t = threading.Thread(target=self.download,args=[requests])
t.daemon = True
t.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.